Example usage for java.util Deque add

List of usage examples for java.util Deque add

Introduction

In this page you can find the example usage for java.util Deque add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into the queue represented by this deque (in other words, at the tail of this deque) if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:edu.stanford.cfuller.colocalization3d.correction.PositionCorrector.java

/**
 * Determines the target registration error for a correction by successively leaving out each ImageObject in a set used to make a correction,
 * calculating a correction from the remaining objects, and assessing the error in correcting the object left out.
 * /*www.j  av a 2  s. c o m*/
 * @param imageObjects                  A Vector containing all the ImageObjects to be used for the correction
 *                                      or in the order it appears in a multiwavelength image file.
 * @return                              The average value of the error over all objects.
 */
public double determineTRE(java.util.List<ImageObject> imageObjects) {

    int referenceChannel = this.parameters.getIntValueForKey(REF_CH_PARAM);

    int channelToCorrect = this.parameters.getIntValueForKey(CORR_CH_PARAM);

    RealVector treVector = new ArrayRealVector(imageObjects.size(), 0.0);
    RealVector treXYVector = new ArrayRealVector(imageObjects.size(), 0.0);

    java.util.Deque<TREThread> startedThreads = new java.util.LinkedList<TREThread>();
    int maxThreads = 1;
    if (this.parameters.hasKey(THREAD_COUNT_PARAM)) {
        maxThreads = this.parameters.getIntValueForKey(THREAD_COUNT_PARAM);
    }
    final int threadWaitTime_ms = 1000;

    for (int removeIndex = 0; removeIndex < imageObjects.size(); removeIndex++) {

        if (removeIndex % 10 == 0) {

            java.util.logging.Logger
                    .getLogger(edu.stanford.cfuller.colocalization3d.Colocalization3DMain.LOGGER_NAME)
                    .finer("calulating TRE: point " + (removeIndex + 1) + " of " + imageObjects.size());
        }

        TREThread nextFit = new TREThread(imageObjects, referenceChannel, channelToCorrect, removeIndex, this);

        if (startedThreads.size() < maxThreads) {
            startedThreads.add(nextFit);
            nextFit.start();
        } else {
            TREThread next = startedThreads.poll();
            try {

                next.join(threadWaitTime_ms);

            } catch (InterruptedException e) {
                e.printStackTrace();
            }

            while (next.isAlive()) {
                startedThreads.add(next);
                next = startedThreads.poll();

                try {

                    next.join(threadWaitTime_ms);

                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }

            treVector.setEntry(next.getRemoveIndex(), next.getTre());

            treXYVector.setEntry(next.getRemoveIndex(), next.getTreXY());

            startedThreads.add(nextFit);
            nextFit.start();
        }

    }

    java.util.List<Integer> unsuccessful_TRE = new java.util.ArrayList<Integer>();

    while (startedThreads.size() > 0) {
        TREThread next = startedThreads.poll();
        try {
            next.join();
            if (next.getSuccess()) {
                treVector.setEntry(next.getRemoveIndex(), next.getTre());
            } else {
                unsuccessful_TRE.add(next.getRemoveIndex());
            }
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    RealVector treVector_mod = new ArrayRealVector(treVector.getDimension() - unsuccessful_TRE.size());
    RealVector treXYVector_mod = new ArrayRealVector(treVector_mod.getDimension());

    int c = 0;

    //unsuccessful TRE calculation results when there is incomplete coverage in the correction dataset
    for (int i = 0; i < treVector.getDimension(); ++i) {
        if (!unsuccessful_TRE.contains(i)) {
            treVector_mod.setEntry(c, treVector.getEntry(i));
            treXYVector_mod.setEntry(c, treXYVector.getEntry(i));
            ++c;
        }
    }

    treVector = treVector_mod;
    treXYVector = treXYVector_mod;

    double tre = treVector.getL1Norm() / treVector.getDimension();
    double xy_tre = (treXYVector.getL1Norm() / treXYVector.getDimension());

    java.util.logging.Logger.getLogger(edu.stanford.cfuller.colocalization3d.Colocalization3DMain.LOGGER_NAME)
            .info("TRE: " + tre);
    java.util.logging.Logger.getLogger(edu.stanford.cfuller.colocalization3d.Colocalization3DMain.LOGGER_NAME)
            .info("x-y TRE: " + xy_tre);

    return tre;

}

From source file:org.nuxeo.ecm.core.storage.sql.PersistenceContext.java

/**
 * Removes a property node and its children.
 * <p>/*from w  w w .  ja va  2s .  co m*/
 * There's less work to do than when we have to remove a generic document
 * node (less selections, and we can assume the depth is small so recurse).
 */
public void removePropertyNode(SimpleFragment hierFragment) throws StorageException {
    // collect children
    Deque<SimpleFragment> todo = new LinkedList<SimpleFragment>();
    List<SimpleFragment> children = new LinkedList<SimpleFragment>();
    todo.add(hierFragment);
    while (!todo.isEmpty()) {
        SimpleFragment fragment = todo.removeFirst();
        todo.addAll(getChildren(fragment.getId(), null, true)); // complex
        children.add(fragment);
    }
    Collections.reverse(children);
    // iterate on children depth first
    for (SimpleFragment fragment : children) {
        // remove from context
        boolean primary = fragment == hierFragment;
        removeFragmentAndDependents(fragment, primary);
        // remove from selections
        // removed from its parent selection
        hierComplex.recordRemoved(fragment);
        // no children anymore
        hierComplex.recordRemovedSelection(fragment.getId());
    }
}

From source file:org.openscore.lang.compiler.utils.ExecutableBuilder.java

private Workflow compileWorkFlow(LinkedHashMap<String, Map<String, Object>> workFlowRawData,
        Map<String, String> imports, Workflow onFailureWorkFlow, boolean onFailureSection) {

    Deque<Task> tasks = new LinkedList<>();

    Validate.notEmpty(workFlowRawData, "Flow must have tasks in its workflow");

    PeekingIterator<Map.Entry<String, Map<String, Object>>> iterator = new PeekingIterator<>(
            workFlowRawData.entrySet().iterator());

    boolean isOnFailureDefined = onFailureWorkFlow != null;

    String defaultFailure = isOnFailureDefined ? onFailureWorkFlow.getTasks().getFirst().getName()
            : FAILURE_RESULT;//from  w w  w  .j  av a  2 s  .  c om

    while (iterator.hasNext()) {
        Map.Entry<String, Map<String, Object>> taskRawData = iterator.next();
        Map.Entry<String, Map<String, Object>> nextTaskData = iterator.peek();
        String taskName = taskRawData.getKey();
        Map<String, Object> taskRawDataValue;
        try {
            taskRawDataValue = taskRawData.getValue();
        } catch (ClassCastException ex) {
            throw new RuntimeException("Task: " + taskName
                    + " syntax is illegal.\nBelow task name, there should be a map of values in the format:\ndo:\n\top_name:");
        }

        String defaultSuccess;
        if (nextTaskData != null) {
            defaultSuccess = nextTaskData.getKey();
        } else {
            defaultSuccess = onFailureSection ? FAILURE_RESULT : SUCCESS_RESULT;
        }
        Task task = compileTask(taskName, taskRawDataValue, defaultSuccess, imports, defaultFailure);
        tasks.add(task);
    }

    if (isOnFailureDefined) {
        tasks.addAll(onFailureWorkFlow.getTasks());
    }

    return new Workflow(tasks);
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * Walk the given directory for all HFiles, and return a Queue containing all such files.
 *//* ww w  . j av  a2 s. c  o  m*/
private void discoverLoadQueue(Deque<LoadQueueItem> ret, Path hfofDir) throws IOException {
    FileSystem fs = hfofDir.getFileSystem(getConf());

    if (!fs.exists(hfofDir)) {
        throw new FileNotFoundException("HFileOutputFormat dir " + hfofDir + " not found");
    }

    FileStatus[] familyDirStatuses = fs.listStatus(hfofDir);
    if (familyDirStatuses == null) {
        throw new FileNotFoundException("No families found in " + hfofDir);
    }

    for (FileStatus stat : familyDirStatuses) {
        if (!stat.isDir()) {
            LOG.warn("Skipping non-directory " + stat.getPath());
            continue;
        }
        Path familyDir = stat.getPath();
        // Skip _logs and .index, etc
        if (familyDir.getName().startsWith("_")
                || familyDir.getName().startsWith(IndexMapReduceUtil.INDEX_DATA_DIR))
            continue;
        byte[] family = Bytes.toBytes(familyDir.getName());
        Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
        for (Path hfile : hfiles) {
            if (hfile.getName().startsWith("_"))
                continue;
            ret.add(new LoadQueueItem(family, hfile));
        }
    }
}

From source file:org.apache.giraph.comm.flow_control.CreditBasedFlowControl.java

@Override
public void sendRequest(int destTaskId, WritableRequest request) {
    Pair<AdjustableSemaphore, Integer> pair = perWorkerOpenRequestMap.get(destTaskId);
    // Check if this is the first time sending a request to a worker. If so, we
    // should the worker id to necessary bookkeeping data structure.
    if (pair == null) {
        pair = new MutablePair<>(new AdjustableSemaphore(maxOpenRequestsPerWorker), -1);
        Pair<AdjustableSemaphore, Integer> temp = perWorkerOpenRequestMap.putIfAbsent(destTaskId, pair);
        perWorkerUnsentRequestMap.putIfAbsent(destTaskId, new ArrayDeque<WritableRequest>());
        resumeRequestsId.putIfAbsent(destTaskId, Sets.<Long>newConcurrentHashSet());
        if (temp != null) {
            pair = temp;/*  ww  w. j  av a 2  s .co  m*/
        }
    }
    AdjustableSemaphore openRequestPermit = pair.getLeft();
    // Try to reserve a spot for the request amongst the open requests of
    // the destination worker.
    boolean shouldSend = openRequestPermit.tryAcquire();
    boolean shouldCache = false;
    while (!shouldSend) {
        // We should not send the request, and should cache the request instead.
        // It may be possible that the unsent message cache is also full, so we
        // should try to acquire a space on the cache, and if there is no extra
        // space in unsent request cache, we should wait until some space
        // become available. However, it is possible that during the time we are
        // waiting on the unsent messages cache, actual buffer for open requests
        // frees up space.
        try {
            shouldCache = unsentRequestPermit.tryAcquire(unsentWaitMsecs, TimeUnit.MILLISECONDS);
        } catch (InterruptedException e) {
            throw new IllegalStateException(
                    "shouldSend: failed " + "while waiting on the unsent request cache to have some more "
                            + "room for extra unsent requests!");
        }
        if (shouldCache) {
            break;
        }
        // We may have an open spot in the meantime that we were waiting on the
        // unsent requests.
        shouldSend = openRequestPermit.tryAcquire();
        if (shouldSend) {
            break;
        }
        // The current thread will be at this point only if it could not make
        // space amongst open requests for the destination worker and has been
        // timed-out in trying to acquire a space amongst unsent messages. So,
        // we should report logs, report progress, and check for request
        // failures.
        nettyClient.logAndSanityCheck();
    }
    // Either shouldSend == true or shouldCache == true
    if (shouldCache) {
        Deque<WritableRequest> unsentRequests = perWorkerUnsentRequestMap.get(destTaskId);
        // This synchronize block is necessary for the following reason:
        // Once we are at this point, it means there was no room for this
        // request to become an open request, hence we have to put it into
        // unsent cache. Consider the case that since last time we checked if
        // there is any room for an additional open request so far, all open
        // requests are delivered and their acknowledgements are also processed.
        // Now, if we put this request in the unsent cache, it is not being
        // considered to become an open request, as the only one who checks
        // on this matter would be the one who receives an acknowledgment for an
        // open request for the destination worker. So, a lock is necessary
        // to forcefully serialize the execution if this scenario is about to
        // happen.
        synchronized (unsentRequests) {
            shouldSend = openRequestPermit.tryAcquire();
            if (!shouldSend) {
                aggregateUnsentRequests.getAndIncrement();
                unsentRequests.add(request);
                return;
            }
        }
        // We found a spot amongst open requests to send this request. So, this
        // request won't be cached anymore.
        unsentRequestPermit.release();
    }
    nettyClient.doSend(destTaskId, request);
}

From source file:org.springframework.xd.dirt.stream.zookeeper.ZooKeeperStreamRepository.java

@Override
public void delete(String id) {
    logger.info("Undeploying stream {}", id);

    String streamDeploymentPath = Paths.build(Paths.STREAM_DEPLOYMENTS, id);
    String streamModuleDeploymentPath = Paths.build(streamDeploymentPath, Paths.MODULES);
    CuratorFramework client = zkConnection.getClient();
    Deque<String> paths = new ArrayDeque<String>();

    try {//from www . j a v  a  2 s  .  co  m
        client.setData().forPath(Paths.build(Paths.STREAM_DEPLOYMENTS, id, Paths.STATUS), ZooKeeperUtils
                .mapToBytes(new DeploymentUnitStatus(DeploymentUnitStatus.State.undeploying).toMap()));
    } catch (Exception e) {
        logger.warn("Exception while transitioning stream {} state to {}", id,
                DeploymentUnitStatus.State.undeploying, e);
    }

    // Place all module deployments into a tree keyed by the
    // ZK transaction id. The ZK transaction id maintains
    // total ordering of all changes. This allows the
    // undeployment of modules in the reverse order in
    // which they were deployed.
    Map<Long, String> txMap = new TreeMap<Long, String>();
    try {
        List<String> deployments = client.getChildren().forPath(streamModuleDeploymentPath);
        for (String deployment : deployments) {
            String path = new StreamDeploymentsPath(Paths.build(streamModuleDeploymentPath, deployment))
                    .build();
            Stat stat = client.checkExists().forPath(path);
            Assert.notNull(stat);
            txMap.put(stat.getCzxid(), path);
        }
    } catch (Exception e) {
        //NoNodeException - nothing to delete
        ZooKeeperUtils.wrapAndThrowIgnoring(e, KeeperException.NoNodeException.class);
    }

    for (String deployment : txMap.values()) {
        paths.add(deployment);
    }

    for (Iterator<String> iterator = paths.descendingIterator(); iterator.hasNext();) {
        try {
            String path = iterator.next();
            logger.trace("removing path {}", path);
            client.delete().deletingChildrenIfNeeded().forPath(path);
        } catch (Exception e) {
            ZooKeeperUtils.wrapAndThrowIgnoring(e, KeeperException.NoNodeException.class);
        }
    }

    try {
        client.delete().deletingChildrenIfNeeded().forPath(streamDeploymentPath);
    } catch (KeeperException.NotEmptyException e) {
        List<String> children = new ArrayList<String>();
        try {
            children.addAll(client.getChildren().forPath(streamModuleDeploymentPath));
        } catch (Exception ex) {
            children.add("Could not load list of children due to " + ex);
        }
        throw new IllegalStateException(String.format("The following children were not deleted from %s: %s",
                streamModuleDeploymentPath, children), e);
    } catch (Exception e) {
        ZooKeeperUtils.wrapAndThrowIgnoring(e, KeeperException.NoNodeException.class);
    }
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * Walk the given directory for all HFiles, and return a Queue containing all such files.
 *///w ww.  j  a v  a 2s.c o  m
private void discoverLoadQueue(final Deque<LoadQueueItem> ret, final Path hfofDir, final boolean validateHFile)
        throws IOException {
    visitBulkHFiles(hfofDir.getFileSystem(getConf()), hfofDir, new BulkHFileVisitor<byte[]>() {
        @Override
        public byte[] bulkFamily(final byte[] familyName) {
            return familyName;
        }

        @Override
        public void bulkHFile(final byte[] family, final FileStatus hfile) throws IOException {
            long length = hfile.getLen();
            if (length > getConf().getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE)) {
                LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length
                        + " bytes can be problematic as it may lead to oversplitting.");
            }
            ret.add(new LoadQueueItem(family, hfile.getPath()));
        }
    }, validateHFile);
}

From source file:com.facebook.litho.MountState.java

/**
 * Clears and re-populates the test item map if we are in e2e test mode.
 */// w ww . j  a  v  a  2 s.c  om
private void processTestOutputs(LayoutState layoutState) {
    if (mTestItemMap == null) {
        return;
    }

    for (Collection<TestItem> items : mTestItemMap.values()) {
        for (TestItem item : items) {
            ComponentsPools.release(item);
        }
    }
    mTestItemMap.clear();

    for (int i = 0, size = layoutState.getTestOutputCount(); i < size; i++) {
        final TestOutput testOutput = layoutState.getTestOutputAt(i);
        final long hostMarker = testOutput.getHostMarker();
        final long layoutOutputId = testOutput.getLayoutOutputId();
        final MountItem mountItem = layoutOutputId == -1 ? null : mIndexToItemMap.get(layoutOutputId);
        final TestItem testItem = ComponentsPools.acquireTestItem();
        testItem.setHost(hostMarker == -1 ? null : mHostsByMarker.get(hostMarker));
        testItem.setBounds(testOutput.getBounds());
        testItem.setTestKey(testOutput.getTestKey());
        testItem.setContent(mountItem == null ? null : mountItem.getContent());

        final Deque<TestItem> items = mTestItemMap.get(testOutput.getTestKey());
        final Deque<TestItem> updatedItems = items == null ? new LinkedList<TestItem>() : items;
        updatedItems.add(testItem);
        mTestItemMap.put(testOutput.getTestKey(), updatedItems);
    }
}

From source file:de.uni_potsdam.hpi.asg.logictool.mapping.SequenceBasedAndGateDecomposer.java

public boolean decomposeAND(NetlistTerm term) {

    logger.info("Decomposition of " + term.toString());

    Set<Signal> signals = netlist.getDrivenSignalsTransitive(term);
    if (signals.isEmpty()) {
        logger.warn("No signal(s) for term " + term + " found");
        return false;
    } else if (signals.size() > 1) {
        logger.warn("Term " + term + " drives more than one signal. This is not supported yet");
        return false;
    }/*from   w  w  w .j  av a 2s . c  o  m*/
    Signal origsig = signals.iterator().next();
    if (!isAOC(term, origsig)) {
        logger.warn("Algorithm not applicable for non-AOC architectures");
        return false;
    }

    int startgatesize = BDDHelper.numberOfVars(term.getBdd());

    BDD bdd = term.getBdd();
    Set<Signal> origrelevant = findRelevantSigs(bdd);
    if (origrelevant == null) {
        return false;
    }

    StateGraph sg2 = sghelper.getNewStateGraph(origrelevant, origsig);
    if (sg2 == null) {
        logger.warn("Failed to generate new SG. Using the original one.");
        sg2 = origsg;
    }

    BiMap<Signal, Signal> sigmap = HashBiMap.create();
    Set<Signal> relevant = new HashSet<>();
    boolean found;
    for (Signal oldSig : origrelevant) {
        found = false;
        for (Signal newSig : sg2.getAllSignals()) {
            if (oldSig.getName().equals(newSig.getName())) {
                sigmap.put(oldSig, newSig);
                found = true;
                break;
            }
        }
        if (!found) {
            logger.error("Signal " + oldSig.getName() + " not found");
            return false;
        }
        relevant.add(sigmap.get(oldSig));
    }
    found = false;
    for (Signal newSig : sg2.getAllSignals()) {
        if (origsig.getName().equals(newSig.getName())) {
            sigmap.put(origsig, newSig);
            found = true;
            break;
        }
    }
    if (!found) {
        logger.error("Signal " + origsig.getName() + " not found");
        return false;
    }
    Signal sig = sigmap.get(origsig);

    Map<Signal, Boolean> posnegmap = getInputsPosOrNeg(term, sigmap);
    BDD newbdd = factory.one();
    for (Entry<Signal, Boolean> entry : posnegmap.entrySet()) {
        if (entry.getValue()) {
            newbdd = newbdd.andWith(getPosBDD(entry.getKey()));
        } else {
            newbdd = newbdd.andWith(getNegBDD(entry.getKey()));
        }
        if (entry.getKey() instanceof QuasiSignal) {
            relevant.add(entry.getKey());
        }
    }

    Set<State> startStates = new HashSet<>();
    for (State s : sg2.getStates()) {
        for (Entry<Transition, State> entry2 : s.getNextStates().entrySet()) {
            if (entry2.getKey().getSignal() == sig) {
                startStates.add(entry2.getValue());
            }
        }
    }

    List<List<Signal>> fallingPartitions = new ArrayList<>();
    for (Signal sig2 : relevant) {
        List<Signal> tmp = new ArrayList<>();
        tmp.add(sig2);
        fallingPartitions.add(tmp);
    }

    SortedSet<IOBehaviour> sequencesFront = new TreeSet<>(new SequenceFrontCmp());
    SortedSet<IOBehaviour> sequencesBack = new TreeSet<>(new SequenceBackCmp());
    Set<IOBehaviour> newSequences = new HashSet<>();
    Set<IOBehaviour> rmSequences = new HashSet<>();
    Deque<IOBehaviourSimulationStep> steps = new ArrayDeque<>();

    pool = new IOBehaviourSimulationStepPool(new IOBehaviourSimulationStepFactory());
    pool.setMaxTotal(-1);

    try {
        root = pool.borrowObject();
    } catch (Exception e) {
        e.printStackTrace();
        logger.error("Could not borrow object");
        return false;
    }

    IOBehaviourSimulationStep newStep;
    for (State s : startStates) {
        try {
            newStep = pool.borrowObject();
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("Could not borrow object");
            return false;
        }
        root.getNextSteps().add(newStep);
        newStep.setPrevStep(root);
        newStep.setStart(s);
        newStep.setNextState(s);
        steps.add(newStep);
    }

    if (steps.isEmpty()) {
        return false;
    }

    final long checkThreshold = 100;

    long stepsEvaledTotal = 0;
    IOBehaviourSimulationStep step = null;
    while (!steps.isEmpty()) {
        step = steps.removeLast();
        //         System.out.println("#Step: " + step.toString());
        getNewSteps(step, sig, newSequences, steps, relevant);
        stepsEvaledTotal++;
        if (newSequences.size() >= checkThreshold) {
            removeCandidates(sequencesFront, sequencesBack, newSequences, rmSequences);
        }
    }
    removeCandidates(sequencesFront, sequencesBack, newSequences, rmSequences);
    logger.debug("Sequences: " + sequencesFront.size() + " - Tmp Sequences: " + newSequences.size()
            + " - Steps to evaluate: " + steps.size() + " - Steps evaluated: " + stepsEvaledTotal);
    logger.debug("Pool: " + "Created: " + pool.getCreatedCount() + ", Borrowed: " + pool.getBorrowedCount()
            + ", Returned: " + pool.getReturnedCount() + ", Active: " + pool.getNumActive() + ", Idle: "
            + pool.getNumIdle());
    logger.debug("RmSub: " + rmSub + " // RmFall: " + rmFall);

    SortedSet<IOBehaviour> sequences = new TreeSet<>(sequencesFront);
    sequencesFront.clear();
    sequencesBack.clear();
    //      System.out.println(sequences.toString());

    List<IOBehaviour> falling = new ArrayList<>();
    List<IOBehaviour> rising = new ArrayList<>();
    List<IOBehaviour> constant = new ArrayList<>();
    if (!categoriseSequences(newbdd, sequences, falling, rising, constant)) {
        return false;
    }
    //      System.out.println("Falling:");
    //      for(IOBehaviour beh : falling) {
    //         System.out.println(beh.toString());
    //      }
    //      System.out.println("Rising:");
    //      for(IOBehaviour beh : rising) {
    //         System.out.println(beh.toString());
    //      }
    //      System.out.println("Constant:");
    //      for(IOBehaviour beh : constant) {
    //         System.out.println(beh.toString());
    //      }

    fallingPartitions = getPossiblePartitionsFromFalling(falling, relevant);
    //      System.out.println("FallingPartitions: " + fallingPartitions.toString());

    Map<Integer, List<Partition>> partitions = getPartitions(relevant, startgatesize);
    if (partitions == null) {
        logger.error("There was a problem while creating partions for signal " + sig.getName());
        return false;
    }

    //      System.out.println("Init:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    filterPartitions(partitions, fallingPartitions);
    if (partitions.isEmpty()) {
        logger.error("No suitable partions found");
        return false;
    }

    //      System.out.println("After filter Falling:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    //      System.out.println("posneg: " + posnegmap.toString());

    setPartitionBDDs(partitions, posnegmap);

    if (!checkRising(rising, partitions)) {
        logger.error("Check rising failed");
        return false;
    }
    if (partitions.isEmpty()) {
        logger.error("No suitable partions found");
        return false;
    }

    //      System.out.println("After filter Rising:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    if (!checkConstant(constant, partitions)) {
        logger.error("Check constant failed");
        return false;
    }
    if (partitions.isEmpty()) {
        logger.error("No suitable partions found");
        return false;
    }

    //      System.out.println("After filter Constant:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    applyDecoResult(term, partitions, posnegmap, sigmap);
    return true;
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * Walk the given directory for all HFiles, and return a Queue
 * containing all such files.// w  w  w .  ja  va2  s.  co m
 */
private void discoverLoadQueue(Deque<LoadQueueItem> ret, Path hfofDir) throws IOException {
    fs = hfofDir.getFileSystem(getConf());

    if (!fs.exists(hfofDir)) {
        throw new FileNotFoundException("HFileOutputFormat dir " + hfofDir + " not found");
    }

    FileStatus[] familyDirStatuses = fs.listStatus(hfofDir);
    if (familyDirStatuses == null) {
        throw new FileNotFoundException("No families found in " + hfofDir);
    }

    for (FileStatus stat : familyDirStatuses) {
        if (!stat.isDirectory()) {
            LOG.warn("Skipping non-directory " + stat.getPath());
            continue;
        }
        Path familyDir = stat.getPath();
        // Skip _logs, etc
        if (familyDir.getName().startsWith("_"))
            continue;
        byte[] family = familyDir.getName().getBytes();
        Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
        for (Path hfile : hfiles) {
            // Skip "_", reference, HFileLink
            String fileName = hfile.getName();
            if (fileName.startsWith("_"))
                continue;
            if (StoreFileInfo.isReference(fileName)) {
                LOG.warn("Skipping reference " + fileName);
                continue;
            }
            if (HFileLink.isHFileLink(fileName)) {
                LOG.warn("Skipping HFileLink " + fileName);
                continue;
            }
            ret.add(new LoadQueueItem(family, hfile));
        }
    }
}