Example usage for java.util TreeSet isEmpty

List of usage examples for java.util TreeSet isEmpty

Introduction

In this page you can find the example usage for java.util TreeSet isEmpty.

Prototype

public boolean isEmpty() 

Source Link

Document

Returns true if this set contains no elements.

Usage

From source file:ch.unil.genescore.pathway.GeneSetLibrary.java

License:asdf

public void computeApproxPathwayCorrelation() {

    DenseMatrix corMat = new DenseMatrix(geneSets_.size(), geneSets_.size());
    for (int i = 0; i < geneSets_.size(); i++) {
        GeneSet leftSet = geneSets_.get(i);
        double leftSize = leftSet.genes_.size();
        for (int j = 0; j < geneSets_.size(); j++) {
            GeneSet rightSet = geneSets_.get(j);
            double rightSize = rightSet.genes_.size();
            HashSet<Gene> unpackedMetaGenes = new HashSet<Gene>();
            HashSet<Gene> allRightGenes = new HashSet<Gene>();
            if (null != rightSet.getMetaGenes())
                for (MetaGene mg : rightSet.getMetaGenes()) {
                    unpackedMetaGenes.addAll(mg.getGenes());
                }/*from www.  j a  v  a 2 s  . c  o  m*/

            allRightGenes.addAll(unpackedMetaGenes);
            allRightGenes.addAll(rightSet.genes_);
            allRightGenes.removeAll(rightSet.getMetaGenes());

            HashSet<Gene> copiedLeftGenes = new HashSet<Gene>(leftSet.genes_);
            copiedLeftGenes.retainAll(allRightGenes);
            double count = copiedLeftGenes.size();
            if (null != leftSet.getMetaGenes())
                for (MetaGene mg : leftSet.getMetaGenes()) {
                    TreeSet<Gene> mgSetCopy = new TreeSet<Gene>(mg.getGenes());
                    mgSetCopy.retainAll(allRightGenes);
                    if (!mgSetCopy.isEmpty()) {
                        count++;
                    }
                }
            double corr = count / Math.sqrt(leftSize * rightSize);
            corMat.set(i, j, corr);
            //corMat.set(j, i, corr);
        }
    }
    pathwayCorMat_ = corMat;
}

From source file:org.wso2.andes.kernel.slot.SlotManager.java

/**
 * Get an overlapped slot by nodeId and the queue name. These are slots
 * which are overlapped with some slots that were acquired by given node
 *
 * @param nodeId    id of the node//from   w w  w . java 2s.  c  o m
 * @param queueName name of the queue slot is required
 * @return slot or null if not found
 */
private Slot getOverlappedSlot(String nodeId, String queueName) {
    Slot slotToBeAssigned = null;
    TreeSet<Slot> currentSlotList;
    HashMap<String, TreeSet<Slot>> queueToSlotMap;
    HashmapStringTreeSetWrapper wrapper = overLappedSlotMap.get(nodeId);

    String lockKey = nodeId + SlotManager.class;

    synchronized (lockKey.intern()) {
        if (null != wrapper) {
            queueToSlotMap = wrapper.getStringListHashMap();
            currentSlotList = queueToSlotMap.get(queueName);
            if (null != currentSlotList && !currentSlotList.isEmpty()) {
                //get and remove slot
                slotToBeAssigned = currentSlotList.pollFirst();
                queueToSlotMap.put(queueName, currentSlotList);
                //update hazelcast map
                wrapper.setStringListHashMap(queueToSlotMap);
                overLappedSlotMap.set(nodeId, wrapper);
                if (log.isDebugEnabled()) {
                    log.debug("Slot Manager - giving a slot from overlapped slot pool. Slot= "
                            + slotToBeAssigned);
                }
            }
        }
    }
    return slotToBeAssigned;
}

From source file:org.callimachusproject.management.CalliServer.java

@Override
public synchronized void setWebappOrigins(String[] origins) throws Exception {
    Map<String, String> previously = conf.getOriginRepositoryIDs();
    final Map<String, String> newOrigins = new LinkedHashMap<String, String>();
    for (String origin : origins) {
        if (!previously.containsKey(origin)) {
            verifyOrigin(origin);//  w  ww  .  java2s  .c om
            newOrigins.put(origin, getRepositoryId(origin));
        }
    }
    final Map<String, String> oldOrigins = new LinkedHashMap<String, String>();
    oldOrigins.putAll(previously);
    oldOrigins.keySet().removeAll(Arrays.asList(origins));
    final Map<String, String> subsequently = new LinkedHashMap<String, String>(previously);
    subsequently.keySet().removeAll(oldOrigins.keySet());
    conf.setOriginRepositoryIDs(subsequently);
    submit(new Callable<Void>() {
        public Void call() throws Exception {
            for (Map.Entry<String, String> e : oldOrigins.entrySet()) {
                String id = e.getValue();
                if (!subsequently.values().contains(id) && manager.hasRepositoryConfig(id)) {
                    String prefix = id + "/";
                    TreeSet<String> subset = new TreeSet<String>();
                    for (String other : manager.getRepositoryIDs()) {
                        if (other.startsWith(prefix)) {
                            subset.add(other);
                        }
                    }
                    while (!subset.isEmpty()) {
                        manager.removeRepository(subset.pollLast());
                    }
                    manager.removeRepository(id);
                }
            }
            for (Map.Entry<String, String> e : newOrigins.entrySet()) {
                String origin = e.getKey();
                String id = e.getValue();
                CalliRepository repository = getSetupRepository(id, origin);
                try {
                    SetupTool tool = new SetupTool(id, repository, conf);
                    tool.setupWebappOrigin(origin);
                } finally {
                    refreshRepository(id);
                }
                serveRealm(origin, id);
            }
            return null;
        }
    });
}

From source file:odcplot.OdcPlot.java

/**
 * Using previously set up object members verify the channel and get needed info
 * @return true if we got what we needed, else return after we've printed errors.
 *//*from w  w w  .  j a  v  a  2 s.c  om*/
private boolean getChanInfo() throws SQLException {
    boolean ret;
    long strt = System.currentTimeMillis();

    {
        int n;
        if (channelName == null || channelName.isEmpty()) {
            throw new IllegalArgumentException("No Channel specified");
        }
        if (server == null || server.isEmpty()) {
            n = chanTbl.getBestMatch(channelName);
            chanInfo = chanTbl.getChanInfo(n);
        } else {
            TreeSet<ChanInfo> chSet = chanTbl.getAsSet(server, channelName, "raw", 10);
            if (!chSet.isEmpty()) {
                chanInfo = chSet.first();
            }
        }
        if (chanInfo == null) {
            System.err.println("Channel requested was not found: " + channelName);
        }
        sampleRate = chanInfo.getRate();
        String dtyp = chanInfo.getdType();

        if (dtyp.equalsIgnoreCase("INT-16")) {
            bytesPerSample = 2;
        } else if (dtyp.equalsIgnoreCase("INT-32") || dtyp.equalsIgnoreCase("UINT-32")) {
            bytesPerSample = 4;
        } else if (dtyp.equalsIgnoreCase("INT-64")) {
            bytesPerSample = 8;
        } else if (dtyp.equalsIgnoreCase("FLT-32")) {
            bytesPerSample = 4;
        } else if (dtyp.equalsIgnoreCase("FLT-64")) {
            bytesPerSample = 8;
        } else if (dtyp.equalsIgnoreCase("CPX-64")) {
            bytesPerSample = 8;
        }
        if (server == null || server.isEmpty()) {
            server = chanInfo.getServer();
        }
        ret = true;
        String sRateStr = sampleRate < 1 ? String.format("%1$.3f", sampleRate)
                : String.format("%1$.0f", sampleRate);
        System.out.format("\nChan: %1$s, sample rate: %2$s, bytes per sample: %3$d\n", channelName, sRateStr,
                bytesPerSample);
        float dur = (System.currentTimeMillis() - strt) / 1000.f;
        System.out.format("Get channel info took %1$.1f sec.\n", dur);
    }
    return ret;
}

From source file:org.apache.hadoop.hbase.backup.impl.RestoreClientImpl.java

/**
 * Restore operation. Stage 2: resolved Backup Image dependency
 * @param backupManifestMap : tableName,  Manifest
 * @param sTableArray The array of tables to be restored
 * @param tTableArray The array of mapping tables to restore to
 * @return set of BackupImages restored//from   w  w w  .ja v  a  2 s . c  o  m
 * @throws IOException exception
 */
private void restoreStage(HashMap<TableName, BackupManifest> backupManifestMap, TableName[] sTableArray,
        TableName[] tTableArray, boolean isOverwrite) throws IOException {
    TreeSet<BackupImage> restoreImageSet = new TreeSet<BackupImage>();
    boolean truncateIfExists = isOverwrite;
    try {
        for (int i = 0; i < sTableArray.length; i++) {
            TableName table = sTableArray[i];
            BackupManifest manifest = backupManifestMap.get(table);
            // Get the image list of this backup for restore in time order from old
            // to new.
            List<BackupImage> list = new ArrayList<BackupImage>();
            list.add(manifest.getBackupImage());
            List<BackupImage> depList = manifest.getDependentListByTable(table);
            list.addAll(depList);
            TreeSet<BackupImage> restoreList = new TreeSet<BackupImage>(list);
            LOG.debug("need to clear merged Image. to be implemented in future jira");
            restoreImages(restoreList.iterator(), table, tTableArray[i], truncateIfExists);
            restoreImageSet.addAll(restoreList);

            if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
                LOG.info("Restore includes the following image(s):");
                for (BackupImage image : restoreImageSet) {
                    LOG.info("Backup: " + image.getBackupId() + " " + HBackupFileSystem
                            .getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
                }
            }
        }
    } catch (Exception e) {
        LOG.error("Failed", e);
        throw new IOException(e);
    }
    LOG.debug("restoreStage finished");

}

From source file:org.unitime.timetable.onlinesectioning.OnlineSectioningServerImpl.java

@Override
public CourseInfo getCourseInfo(String course) {
    iLock.readLock().lock();/*  w w w  . ja v a 2  s  . c  o m*/
    try {
        if (course.indexOf('-') >= 0) {
            String courseName = course.substring(0, course.indexOf('-')).trim();
            String title = course.substring(course.indexOf('-') + 1).trim();
            TreeSet<CourseInfo> infos = iCourseForName.get(courseName.toLowerCase());
            if (infos != null && !infos.isEmpty())
                for (CourseInfo info : infos)
                    if (title.equalsIgnoreCase(info.getTitle()))
                        return info;
            return null;
        } else {
            TreeSet<CourseInfo> infos = iCourseForName.get(course.toLowerCase());
            if (infos != null && !infos.isEmpty())
                return infos.first();
            return null;
        }
    } finally {
        iLock.readLock().unlock();
    }
}

From source file:org.apache.cassandra.concurrent.LongSharedExecutorPoolTest.java

private void testPromptnessOfExecution(long intervalNanos, float loadIncrement)
        throws InterruptedException, ExecutionException {
    final int executorCount = 4;
    int threadCount = 8;
    int maxQueued = 1024;
    final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
    final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
    final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);

    final int[] threadCounts = new int[executorCount];
    final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
    final ExecutorService[] executors = new ExecutorService[executorCount];
    for (int i = 0; i < executors.length; i++) {
        executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i);
        threadCounts[i] = threadCount;//from   www . j a  va  2  s  . c  om
        workCount[i] = new WeibullDistribution(2, maxQueued);
        threadCount *= 2;
        maxQueued *= 2;
    }

    long runs = 0;
    long events = 0;
    final TreeSet<Batch> pending = new TreeSet<>();
    final BitSet executorsWithWork = new BitSet(executorCount);
    long until = 0;
    // basic idea is to go through different levels of load on the executor service; initially is all small batches
    // (mostly within max queue size) of very short operations, moving to progressively larger batches
    // (beyond max queued size), and longer operations
    for (float multiplier = 0f; multiplier < 2.01f;) {
        if (System.nanoTime() > until) {
            System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f,
                    events * 0.000001f));
            events = 0;
            until = System.nanoTime() + intervalNanos;
            multiplier += loadIncrement;
            System.out.println(String.format("Running for %ds with load multiplier %.1f",
                    TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
        }

        // wait a random amount of time so we submit new tasks in various stages of
        long timeout;
        if (pending.isEmpty())
            timeout = 0;
        else if (Math.random() > 0.98)
            timeout = Long.MAX_VALUE;
        else if (pending.size() == executorCount)
            timeout = pending.first().timeout;
        else
            timeout = (long) (Math.random() * pending.last().timeout);

        while (!pending.isEmpty() && timeout > System.nanoTime()) {
            Batch first = pending.first();
            boolean complete = false;
            try {
                for (Result result : first.results.descendingSet())
                    result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
                complete = true;
            } catch (TimeoutException e) {
            }
            if (!complete && System.nanoTime() > first.timeout) {
                for (Result result : first.results)
                    if (!result.future.isDone())
                        throw new AssertionError();
                complete = true;
            }
            if (complete) {
                pending.pollFirst();
                executorsWithWork.clear(first.executorIndex);
            }
        }

        // if we've emptied the executors, give all our threads an opportunity to spin down
        if (timeout == Long.MAX_VALUE)
            Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);

        // submit a random batch to the first free executor service
        int executorIndex = executorsWithWork.nextClearBit(0);
        if (executorIndex >= executorCount)
            continue;
        executorsWithWork.set(executorIndex);
        ExecutorService executor = executors[executorIndex];
        TreeSet<Result> results = new TreeSet<>();
        int count = (int) (workCount[executorIndex].sample() * multiplier);
        long targetTotalElapsed = 0;
        long start = System.nanoTime();
        long baseTime;
        if (Math.random() > 0.5)
            baseTime = 2 * (long) (workTime.sample() * multiplier);
        else
            baseTime = 0;
        for (int j = 0; j < count; j++) {
            long time;
            if (baseTime == 0)
                time = (long) (workTime.sample() * multiplier);
            else
                time = (long) (baseTime * Math.random());
            if (time < minWorkTime)
                time = minWorkTime;
            if (time > maxWorkTime)
                time = maxWorkTime;
            targetTotalElapsed += time;
            Future<?> future = executor.submit(new WaitTask(time));
            results.add(new Result(future, System.nanoTime() + time));
        }
        long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                + TimeUnit.MILLISECONDS.toNanos(100L);
        long now = System.nanoTime();
        if (runs++ > executorCount && now > end)
            throw new AssertionError();
        events += results.size();
        pending.add(new Batch(results, end, executorIndex));
        //            System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
    }
}

From source file:net.sourceforge.fenixedu.presentationTier.Action.administrativeOffice.scholarship.utl.report.StudentLine.java

public LocalDate getFirstEnrolmentOnCurrentExecutionYear() {
    if (getRegistration() == null) {
        return null;
    }//w  w  w.j a  va 2s .  c om

    if (getRegistration().isInMobilityState()) {
        return getForExecutionYear().getBeginDateYearMonthDay().toLocalDate();
    }

    TreeSet<Enrolment> orderedEnrolmentSet = new TreeSet<Enrolment>(
            Collections.reverseOrder(CurriculumModule.COMPARATOR_BY_CREATION_DATE));
    orderedEnrolmentSet.addAll(getStudentCurricularPlan().getEnrolmentsByExecutionYear(getForExecutionYear()));

    return orderedEnrolmentSet.isEmpty() ? null
            : orderedEnrolmentSet.iterator().next().getCreationDateDateTime().toLocalDate();
}

From source file:bes.injector.InjectorBurnTest.java

private void testPromptnessOfExecution(long intervalNanos, float loadIncrement)
        throws InterruptedException, ExecutionException, TimeoutException {
    final int executorCount = 4;
    int threadCount = 8;
    int maxQueued = 1024;
    final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
    final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
    final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);

    final int[] threadCounts = new int[executorCount];
    final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
    final ExecutorService[] executors = new ExecutorService[executorCount];
    final Injector injector = new Injector("");
    for (int i = 0; i < executors.length; i++) {
        executors[i] = injector.newExecutor(threadCount, maxQueued);
        threadCounts[i] = threadCount;//from ww  w . jav  a  2  s.  com
        workCount[i] = new WeibullDistribution(2, maxQueued);
        threadCount *= 2;
        maxQueued *= 2;
    }

    long runs = 0;
    long events = 0;
    final TreeSet<Batch> pending = new TreeSet<Batch>();
    final BitSet executorsWithWork = new BitSet(executorCount);
    long until = 0;
    // basic idea is to go through different levels of load on the executor service; initially is all small batches
    // (mostly within max queue size) of very short operations, moving to progressively larger batches
    // (beyond max queued size), and longer operations
    for (float multiplier = 0f; multiplier < 2.01f;) {
        if (System.nanoTime() > until) {
            System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f,
                    events * 0.000001f));
            events = 0;
            until = System.nanoTime() + intervalNanos;
            multiplier += loadIncrement;
            System.out.println(String.format("Running for %ds with load multiplier %.1f",
                    TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
        }

        // wait a random amount of time so we submit new tasks in various stages of
        long timeout;
        if (pending.isEmpty())
            timeout = 0;
        else if (Math.random() > 0.98)
            timeout = Long.MAX_VALUE;
        else if (pending.size() == executorCount)
            timeout = pending.first().timeout;
        else
            timeout = (long) (Math.random() * pending.last().timeout);

        while (!pending.isEmpty() && timeout > System.nanoTime()) {
            Batch first = pending.first();
            boolean complete = false;
            try {
                for (Result result : first.results.descendingSet())
                    result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
                complete = true;
            } catch (TimeoutException e) {
            }
            if (!complete && System.nanoTime() > first.timeout) {
                for (Result result : first.results)
                    if (!result.future.isDone())
                        throw new AssertionError();
                complete = true;
            }
            if (complete) {
                pending.pollFirst();
                executorsWithWork.clear(first.executorIndex);
            }
        }

        // if we've emptied the executors, give all our threads an opportunity to spin down
        if (timeout == Long.MAX_VALUE) {
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
            }
        }

        // submit a random batch to the first free executor service
        int executorIndex = executorsWithWork.nextClearBit(0);
        if (executorIndex >= executorCount)
            continue;
        executorsWithWork.set(executorIndex);
        ExecutorService executor = executors[executorIndex];
        TreeSet<Result> results = new TreeSet<Result>();
        int count = (int) (workCount[executorIndex].sample() * multiplier);
        long targetTotalElapsed = 0;
        long start = System.nanoTime();
        long baseTime;
        if (Math.random() > 0.5)
            baseTime = 2 * (long) (workTime.sample() * multiplier);
        else
            baseTime = 0;
        for (int j = 0; j < count; j++) {
            long time;
            if (baseTime == 0)
                time = (long) (workTime.sample() * multiplier);
            else
                time = (long) (baseTime * Math.random());
            if (time < minWorkTime)
                time = minWorkTime;
            if (time > maxWorkTime)
                time = maxWorkTime;
            targetTotalElapsed += time;
            Future<?> future = executor.submit(new WaitTask(time));
            results.add(new Result(future, System.nanoTime() + time));
        }
        long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                + TimeUnit.MILLISECONDS.toNanos(100L);
        long now = System.nanoTime();
        if (runs++ > executorCount && now > end)
            throw new AssertionError();
        events += results.size();
        pending.add(new Batch(results, end, executorIndex));
        //            System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
    }
}

From source file:org.commonjava.maven.galley.filearc.internal.ZipListing.java

@Override
public ListingResult call() {
    final File src = getArchiveFile(resource.getLocationUri());
    if (!src.canRead() || src.isDirectory()) {
        return null;
    }//w  w w.  j  a va 2 s .  c  o m

    final boolean isJar = isJar(resource.getLocationUri());

    final TreeSet<String> filenames = new TreeSet<String>();

    ZipFile zf = null;
    try {
        if (isJar) {
            zf = new JarFile(src);
        } else {
            zf = new ZipFile(src);
        }

        final String path = resource.getPath();
        final int pathLen = path.length();
        for (final ZipEntry entry : Collections.list(zf.entries())) {
            String name = entry.getName();
            if (name.startsWith(path)) {
                name = name.substring(pathLen);

                if (name.startsWith("/") && name.length() > 1) {
                    name = name.substring(1);

                    if (name.indexOf("/") < 0) {
                        filenames.add(name);
                    }
                }
            }
        }

    } catch (final IOException e) {
        error = new TransferException("Failed to get listing for: %s to: %s. Reason: %s", e, resource,
                e.getMessage());
    } finally {
        if (zf != null) {
            try {
                zf.close();
            } catch (final IOException e) {
            }
        }
    }

    if (!filenames.isEmpty()) {
        OutputStream stream = null;
        try {
            stream = target.openOutputStream(TransferOperation.DOWNLOAD);
            stream.write(join(filenames, "\n").getBytes("UTF-8"));

            return new ListingResult(resource, filenames.toArray(new String[filenames.size()]));
        } catch (final IOException e) {
            error = new TransferException("Failed to write listing to: %s. Reason: %s", e, target,
                    e.getMessage());
        } finally {
            closeQuietly(stream);
        }
    }

    return null;
}