Example usage for java.util Deque isEmpty

List of usage examples for java.util Deque isEmpty

Introduction

In this page you can find the example usage for java.util Deque isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * @param table the table to load into//from  w  w  w  .j ava2  s.c  om
 * @param pool the ExecutorService
 * @param queue the queue for LoadQueueItem
 * @param startEndKeys start and end keys
 * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles.
 */
private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase(final Table table,
        ExecutorService pool, Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys)
        throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);
    Set<String> missingHFiles = new HashSet<>();
    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = new Pair<>(regionGroups, missingHFiles);

    // drain LQIs and figure out bulk load groups
    Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<Pair<List<LoadQueueItem>, String>> call = new Callable<Pair<List<LoadQueueItem>, String>>() {
            @Override
            public Pair<List<LoadQueueItem>, String> call() throws Exception {
                Pair<List<LoadQueueItem>, String> splits = groupOrSplit(regionGroups, item, table,
                        startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) {
        try {
            Pair<List<LoadQueueItem>, String> splits = lqis.get();
            if (splits != null) {
                if (splits.getFirst() != null) {
                    queue.addAll(splits.getFirst());
                } else {
                    missingHFiles.add(splits.getSecond());
                }
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
    return pair;
}

From source file:com.google.uzaygezen.core.Pow2LengthBitSetRangeFactory.java

@Override
public Map<Pow2LengthBitSetRange, NodeValue<V>> apply(MapNode<BitVector, V> from) {
    if (from == null) {
        return ImmutableMap.of();
    }/*from w w  w  .  ja  v  a2  s . c om*/
    Deque<MapNode<BitVector, V>> inputStack = new ArrayDeque<MapNode<BitVector, V>>();
    Deque<BitVectorWithIterationLevelAndValue> outputStack = new ArrayDeque<BitVectorWithIterationLevelAndValue>();
    inputStack.push(from);
    int n = elementLengthSums.length;
    int bitCount = n == 0 ? 0 : elementLengthSums[n - 1];
    outputStack.push(new BitVectorWithIterationLevelAndValue(BitVectorFactories.OPTIMAL.apply(bitCount), n,
            from.getValue()));
    MapNode<BitVector, V> inputNode;
    Map<Pow2LengthBitSetRange, NodeValue<V>> map = Maps.newHashMap();
    while ((inputNode = inputStack.poll()) != null) {
        BitVectorWithIterationLevelAndValue outputElement = outputStack.poll();
        map.put(new Pow2LengthBitSetRange(outputElement.bitVector,
                outputElement.level == 0 ? 0 : elementLengthSums[outputElement.level - 1]),
                NodeValue.of(outputElement.value, inputNode.getChildren().isEmpty()));
        Preconditions.checkArgument(
                outputElement.level > 0 || (inputNode.getChildren().isEmpty() && outputElement.level >= 0));
        for (Entry<BitVector, MapNode<BitVector, V>> entry : inputNode.getChildren().entrySet()) {
            inputStack.push(entry.getValue());
            BitVector childBitSet = outputElement.bitVector.clone();
            BitVector key = entry.getKey();
            for (int i = key.size() == 0 ? -1 : key.nextSetBit(0); i != -1; i = i == key.size() - 1 ? -1
                    : key.nextSetBit(i + 1)) {
                int bitIndex = (outputElement.level == 1 ? 0 : elementLengthSums[outputElement.level - 2]) + i;
                Preconditions.checkState(bitIndex < bitCount, "bitIndex is too high");
                Preconditions.checkState(!childBitSet.get(bitIndex));
                childBitSet.set(bitIndex);
            }
            outputStack.push(new BitVectorWithIterationLevelAndValue(childBitSet, outputElement.level - 1,
                    entry.getValue().getValue()));
        }
    }
    Preconditions.checkState(outputStack.isEmpty() & !map.isEmpty());
    return map;
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat//from w w  w  . j  av a2 s.  c  o m
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = getConf().getInt("hbase.loadincremental.threads.max",
            Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        // check whether there is invalid family name in HFiles to be bulkloaded
        Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
        ArrayList<String> familyNames = new ArrayList<String>();
        for (HColumnDescriptor family : families) {
            familyNames.add(family.getNameAsString());
        }
        ArrayList<String> unmatchedFamilies = new ArrayList<String>();
        for (LoadQueueItem lqi : queue) {
            String familyNameInHFile = Bytes.toString(lqi.family);
            if (!familyNames.contains(familyNameInHFile)) {
                unmatchedFamilies.add(familyNameInHFile);
            }
        }
        if (unmatchedFamilies.size() > 0) {
            String msg = "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
                    + unmatchedFamilies + "; valid family names of table "
                    + Bytes.toString(table.getTableName()) + " are: " + familyNames;
            LOG.error(msg);
            throw new IOException(msg);
        }
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        //If using secure bulk load, get source delegation token, and
        //prepare staging directory and token
        if (userProvider.isHBaseSecurityEnabled()) {
            // fs is the source filesystem
            fsDelegationToken.acquireDelegationToken(fs);

            bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName());
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
                // Error is logged inside checkHFilesCountPerRegionPerFamily.
                throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
                        + " hfiles to one family of one region");
            }

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        if (userProvider.isHBaseSecurityEnabled()) {
            fsDelegationToken.releaseDelegationToken();

            if (bulkToken != null) {
                new SecureBulkLoadClient(table).cleanupBulkLoad(bulkToken);
            }
        }
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }

    if (queue != null && !queue.isEmpty()) {
        throw new RuntimeException(
                "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
    }
}

From source file:de.uni_potsdam.hpi.asg.logictool.mapping.SequenceBasedAndGateDecomposer.java

public boolean decomposeAND(NetlistTerm term) {

    logger.info("Decomposition of " + term.toString());

    Set<Signal> signals = netlist.getDrivenSignalsTransitive(term);
    if (signals.isEmpty()) {
        logger.warn("No signal(s) for term " + term + " found");
        return false;
    } else if (signals.size() > 1) {
        logger.warn("Term " + term + " drives more than one signal. This is not supported yet");
        return false;
    }// w w w  .  ja  v a  2 s  . com
    Signal origsig = signals.iterator().next();
    if (!isAOC(term, origsig)) {
        logger.warn("Algorithm not applicable for non-AOC architectures");
        return false;
    }

    int startgatesize = BDDHelper.numberOfVars(term.getBdd());

    BDD bdd = term.getBdd();
    Set<Signal> origrelevant = findRelevantSigs(bdd);
    if (origrelevant == null) {
        return false;
    }

    StateGraph sg2 = sghelper.getNewStateGraph(origrelevant, origsig);
    if (sg2 == null) {
        logger.warn("Failed to generate new SG. Using the original one.");
        sg2 = origsg;
    }

    BiMap<Signal, Signal> sigmap = HashBiMap.create();
    Set<Signal> relevant = new HashSet<>();
    boolean found;
    for (Signal oldSig : origrelevant) {
        found = false;
        for (Signal newSig : sg2.getAllSignals()) {
            if (oldSig.getName().equals(newSig.getName())) {
                sigmap.put(oldSig, newSig);
                found = true;
                break;
            }
        }
        if (!found) {
            logger.error("Signal " + oldSig.getName() + " not found");
            return false;
        }
        relevant.add(sigmap.get(oldSig));
    }
    found = false;
    for (Signal newSig : sg2.getAllSignals()) {
        if (origsig.getName().equals(newSig.getName())) {
            sigmap.put(origsig, newSig);
            found = true;
            break;
        }
    }
    if (!found) {
        logger.error("Signal " + origsig.getName() + " not found");
        return false;
    }
    Signal sig = sigmap.get(origsig);

    Map<Signal, Boolean> posnegmap = getInputsPosOrNeg(term, sigmap);
    BDD newbdd = factory.one();
    for (Entry<Signal, Boolean> entry : posnegmap.entrySet()) {
        if (entry.getValue()) {
            newbdd = newbdd.andWith(getPosBDD(entry.getKey()));
        } else {
            newbdd = newbdd.andWith(getNegBDD(entry.getKey()));
        }
        if (entry.getKey() instanceof QuasiSignal) {
            relevant.add(entry.getKey());
        }
    }

    Set<State> startStates = new HashSet<>();
    for (State s : sg2.getStates()) {
        for (Entry<Transition, State> entry2 : s.getNextStates().entrySet()) {
            if (entry2.getKey().getSignal() == sig) {
                startStates.add(entry2.getValue());
            }
        }
    }

    List<List<Signal>> fallingPartitions = new ArrayList<>();
    for (Signal sig2 : relevant) {
        List<Signal> tmp = new ArrayList<>();
        tmp.add(sig2);
        fallingPartitions.add(tmp);
    }

    SortedSet<IOBehaviour> sequencesFront = new TreeSet<>(new SequenceFrontCmp());
    SortedSet<IOBehaviour> sequencesBack = new TreeSet<>(new SequenceBackCmp());
    Set<IOBehaviour> newSequences = new HashSet<>();
    Set<IOBehaviour> rmSequences = new HashSet<>();
    Deque<IOBehaviourSimulationStep> steps = new ArrayDeque<>();

    pool = new IOBehaviourSimulationStepPool(new IOBehaviourSimulationStepFactory());
    pool.setMaxTotal(-1);

    try {
        root = pool.borrowObject();
    } catch (Exception e) {
        e.printStackTrace();
        logger.error("Could not borrow object");
        return false;
    }

    IOBehaviourSimulationStep newStep;
    for (State s : startStates) {
        try {
            newStep = pool.borrowObject();
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("Could not borrow object");
            return false;
        }
        root.getNextSteps().add(newStep);
        newStep.setPrevStep(root);
        newStep.setStart(s);
        newStep.setNextState(s);
        steps.add(newStep);
    }

    if (steps.isEmpty()) {
        return false;
    }

    final long checkThreshold = 100;

    long stepsEvaledTotal = 0;
    IOBehaviourSimulationStep step = null;
    while (!steps.isEmpty()) {
        step = steps.removeLast();
        //         System.out.println("#Step: " + step.toString());
        getNewSteps(step, sig, newSequences, steps, relevant);
        stepsEvaledTotal++;
        if (newSequences.size() >= checkThreshold) {
            removeCandidates(sequencesFront, sequencesBack, newSequences, rmSequences);
        }
    }
    removeCandidates(sequencesFront, sequencesBack, newSequences, rmSequences);
    logger.debug("Sequences: " + sequencesFront.size() + " - Tmp Sequences: " + newSequences.size()
            + " - Steps to evaluate: " + steps.size() + " - Steps evaluated: " + stepsEvaledTotal);
    logger.debug("Pool: " + "Created: " + pool.getCreatedCount() + ", Borrowed: " + pool.getBorrowedCount()
            + ", Returned: " + pool.getReturnedCount() + ", Active: " + pool.getNumActive() + ", Idle: "
            + pool.getNumIdle());
    logger.debug("RmSub: " + rmSub + " // RmFall: " + rmFall);

    SortedSet<IOBehaviour> sequences = new TreeSet<>(sequencesFront);
    sequencesFront.clear();
    sequencesBack.clear();
    //      System.out.println(sequences.toString());

    List<IOBehaviour> falling = new ArrayList<>();
    List<IOBehaviour> rising = new ArrayList<>();
    List<IOBehaviour> constant = new ArrayList<>();
    if (!categoriseSequences(newbdd, sequences, falling, rising, constant)) {
        return false;
    }
    //      System.out.println("Falling:");
    //      for(IOBehaviour beh : falling) {
    //         System.out.println(beh.toString());
    //      }
    //      System.out.println("Rising:");
    //      for(IOBehaviour beh : rising) {
    //         System.out.println(beh.toString());
    //      }
    //      System.out.println("Constant:");
    //      for(IOBehaviour beh : constant) {
    //         System.out.println(beh.toString());
    //      }

    fallingPartitions = getPossiblePartitionsFromFalling(falling, relevant);
    //      System.out.println("FallingPartitions: " + fallingPartitions.toString());

    Map<Integer, List<Partition>> partitions = getPartitions(relevant, startgatesize);
    if (partitions == null) {
        logger.error("There was a problem while creating partions for signal " + sig.getName());
        return false;
    }

    //      System.out.println("Init:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    filterPartitions(partitions, fallingPartitions);
    if (partitions.isEmpty()) {
        logger.error("No suitable partions found");
        return false;
    }

    //      System.out.println("After filter Falling:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    //      System.out.println("posneg: " + posnegmap.toString());

    setPartitionBDDs(partitions, posnegmap);

    if (!checkRising(rising, partitions)) {
        logger.error("Check rising failed");
        return false;
    }
    if (partitions.isEmpty()) {
        logger.error("No suitable partions found");
        return false;
    }

    //      System.out.println("After filter Rising:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    if (!checkConstant(constant, partitions)) {
        logger.error("Check constant failed");
        return false;
    }
    if (partitions.isEmpty()) {
        logger.error("No suitable partions found");
        return false;
    }

    //      System.out.println("After filter Constant:");
    //      for(Entry<Integer, List<Partition>> entry : partitions.entrySet()) {
    //         System.out.println(entry.getKey());
    //         for(Partition p : entry.getValue()) {
    //            System.out.println("\t" + p.toString());
    //         }
    //      }

    applyDecoResult(term, partitions, posnegmap, sigmap);
    return true;
}

From source file:de.escalon.hypermedia.spring.hydra.PagedResourcesSerializer.java

@Override
public void serialize(PagedResources pagedResources, JsonGenerator jgen, SerializerProvider serializerProvider)
        throws IOException {

    final SerializationConfig config = serializerProvider.getConfig();
    JavaType javaType = config.constructType(pagedResources.getClass());

    JsonSerializer<Object> serializer = BeanSerializerFactory.instance.createSerializer(serializerProvider,
            javaType);/*from   w  w  w .  j a va 2  s  . c o m*/

    // replicate pretty much everything from JacksonHydraSerializer
    // since we must reorganize the internals of pagedResources to get a hydra collection
    // with partial page view, we have to serialize pagedResources with an
    // unwrapping serializer
    Deque<LdContext> contextStack = (Deque<LdContext>) serializerProvider.getAttribute(KEY_LD_CONTEXT);
    if (contextStack == null) {
        contextStack = new ArrayDeque<LdContext>();
        serializerProvider.setAttribute(KEY_LD_CONTEXT, contextStack);
    }

    // TODO: filter next/previous/first/last from link list - maybe create new PagedResources without them?
    List<Link> links = pagedResources.getLinks();
    List<Link> filteredLinks = new ArrayList<Link>();
    for (Link link : links) {
        String rel = link.getRel();
        if (navigationRels.contains(rel)) {
            continue;
        } else {
            filteredLinks.add(link);
        }
    }

    PagedResources toRender = new PagedResources(pagedResources.getContent(), pagedResources.getMetadata(),
            filteredLinks);

    jgen.writeStartObject();

    serializeContext(toRender, jgen, serializerProvider, contextStack);

    jgen.writeStringField(JsonLdKeywords.AT_TYPE, "hydra:Collection");

    // serialize with PagedResourcesMixin
    serializer.unwrappingSerializer(NameTransformer.NOP).serialize(toRender, jgen, serializerProvider);

    PagedResources.PageMetadata metadata = pagedResources.getMetadata();
    jgen.writeNumberField("hydra:totalItems", metadata.getTotalElements());

    // begin hydra:view
    jgen.writeObjectFieldStart("hydra:view");
    jgen.writeStringField(JsonLdKeywords.AT_TYPE, "hydra:PartialCollectionView");
    writeRelLink(pagedResources, jgen, Link.REL_NEXT);
    writeRelLink(pagedResources, jgen, "previous");
    // must also translate prev to its synonym previous
    writeRelLink(pagedResources, jgen, Link.REL_PREVIOUS, "previous");
    writeRelLink(pagedResources, jgen, Link.REL_FIRST);
    writeRelLink(pagedResources, jgen, Link.REL_LAST);
    jgen.writeEndObject();
    // end hydra:view

    jgen.writeEndObject();

    contextStack = (Deque<LdContext>) serializerProvider.getAttribute(KEY_LD_CONTEXT);
    if (!contextStack.isEmpty()) {
        contextStack.pop();
    }

}

From source file:jetbrains.exodus.entitystore.FileSystemBlobVaultOld.java

@Override
public BackupStrategy getBackupStrategy() {
    return new BackupStrategy() {

        @Override//w  w  w.  j  a  v  a 2s.  c om
        public Iterable<FileDescriptor> listFiles() {
            return new Iterable<FileDescriptor>() {
                @Override
                public Iterator<FileDescriptor> iterator() {
                    final Deque<FileDescriptor> queue = new LinkedList<>();
                    queue.add(new FileDescriptor(location, blobsDirectory + File.separator));
                    return new Iterator<FileDescriptor>() {
                        int i = 0;
                        int n = 0;
                        File[] files;
                        FileDescriptor next;
                        String currentPrefix;

                        @Override
                        public boolean hasNext() {
                            if (next != null) {
                                return true;
                            }
                            while (i < n) {
                                final File file = files[i++];
                                final String name = file.getName();
                                if (file.isDirectory()) {
                                    queue.push(new FileDescriptor(file,
                                            currentPrefix + file.getName() + File.separator));
                                } else if (file.isFile()) {
                                    final long fileSize = file.length();
                                    if (fileSize == 0)
                                        continue;
                                    if (name.endsWith(blobExtension) || name.equalsIgnoreCase(VERSION_FILE)) {
                                        next = new FileDescriptor(file, currentPrefix, fileSize);
                                        return true;
                                    }
                                } else {
                                    // something strange with filesystem
                                    throw new EntityStoreException(
                                            "File or directory expected: " + file.toString());
                                }
                            }
                            if (queue.isEmpty()) {
                                return false;
                            }
                            final FileDescriptor fd = queue.pop();
                            files = IOUtil.listFiles(fd.getFile());
                            currentPrefix = fd.getPath();
                            i = 0;
                            n = files.length;
                            next = fd;
                            return true;
                        }

                        @Override
                        public FileDescriptor next() {
                            if (!hasNext()) {
                                throw new NoSuchElementException();
                            }
                            final FileDescriptor result = next;
                            next = null;
                            return result;
                        }

                        @Override
                        public void remove() {
                            throw new UnsupportedOperationException();
                        }
                    };
                }
            };
        }
    };
}

From source file:com.datastax.loader.CqlDelimLoad.java

public boolean run(String[] args)
        throws IOException, ParseException, InterruptedException, ExecutionException, KeyStoreException,
        NoSuchAlgorithmException, KeyManagementException, CertificateException, UnrecoverableKeyException {
    if (false == parseArgs(args)) {
        System.err.println("Bad arguments");
        System.err.println(usage());
        return false;
    }//  w w  w.  j  av  a2s  . c  om

    // Setup
    if (false == setup())
        return false;

    // open file
    Deque<File> fileList = new ArrayDeque<File>();
    File infile = null;
    File[] inFileList = null;
    boolean onefile = true;
    if (STDIN.equalsIgnoreCase(filename)) {
        infile = null;
    } else {
        infile = new File(filename);
        if (infile.isFile()) {
        } else {
            inFileList = infile.listFiles();
            if (inFileList.length < 1)
                throw new IOException("directory is empty");
            onefile = false;
            Arrays.sort(inFileList, new Comparator<File>() {
                public int compare(File f1, File f2) {
                    return f1.getName().compareTo(f2.getName());
                }
            });
            for (int i = 0; i < inFileList.length; i++)
                fileList.push(inFileList[i]);
        }
    }

    // Launch Threads
    ExecutorService executor;
    long total = 0;
    if (onefile) {
        // One file/stdin to process
        executor = Executors.newSingleThreadExecutor();
        Callable<Long> worker = new CqlDelimLoadTask(cqlSchema, delimiter, charsPerColumn, nullString,
                commentString, dateFormatString, localDateFormatString, boolStyle, locale, maxErrors, skipRows,
                skipCols, maxRows, badDir, infile, session, consistencyLevel, numFutures, batchSize, numRetries,
                queryTimeout, maxInsertErrors, successDir, failureDir, nullsUnset, format, keyspace, table);
        Future<Long> res = executor.submit(worker);
        total = res.get();
        executor.shutdown();
    } else {
        executor = Executors.newFixedThreadPool(numThreads);
        Set<Future<Long>> results = new HashSet<Future<Long>>();
        while (!fileList.isEmpty()) {
            File tFile = fileList.pop();
            Callable<Long> worker = new CqlDelimLoadTask(cqlSchema, delimiter, charsPerColumn, nullString,
                    commentString, dateFormatString, localDateFormatString, boolStyle, locale, maxErrors,
                    skipRows, skipCols, maxRows, badDir, tFile, session, consistencyLevel, numFutures,
                    batchSize, numRetries, queryTimeout, maxInsertErrors, successDir, failureDir, nullsUnset,
                    format, keyspace, table);
            results.add(executor.submit(worker));
        }
        executor.shutdown();
        for (Future<Long> res : results)
            total += res.get();
    }

    // Cleanup
    cleanup();
    //System.err.println("Total rows inserted: " + total);

    return true;
}

From source file:org.betaconceptframework.astroboa.engine.definition.visitor.CmsPropertyVisitor.java

private void populateDefinition(XSComponent component) {
    try {/*  w w w  . ja v a2s .c  o m*/

        if (isDefinitionComplex()) {
            //Do not process complex property if already exists
            if (component instanceof XSComplexType) {
                if (name != null
                        && cmsDefinitionVisitor.getDefinitionCacheRegion().hasComplexTypeDefinition(name)) {
                    logger.debug("Complex Definition '{}' has already been processed.", name);

                    createDefinition = false;
                    return;
                }
            }
        }

        //Set namespaceURI
        retrieveElementNamespace(component);

        //Set URI
        retrieveSchemaURI(component);

        setBasicProperties(component);

        //process further in case of ComplexProperty or ContentType
        if (ValueType.ContentType == valueType || isDefinitionComplex()) {

            if (component instanceof XSComplexType) {
                visitSubProperties(((XSComplexType) component), false);
            } else if (component instanceof XSElementDecl) {

                final XSElementDecl element = ((XSElementDecl) component);
                if (element.getType() != null) {
                    if (ValueType.ContentType == valueType) {
                        //XML element represents a ContentType.
                        //First we have to visit the inherited elements (at least from complexType contentObjectType
                        //found in astroboa-model.xsd)

                        if (!element.getType().isComplexType()) {
                            throw new CmsException("Element " + element.getName()
                                    + " is Content Type but it does not refer to a complex type");
                        }

                        //Create a stack of all parent elements
                        Deque<XSType> elementTypes = collectAllSuperTypesIncludingProvidedElement(element);

                        if (superTypes == null) {
                            superTypes = new ArrayList<String>();
                        }

                        //Now visit properties for each element stored
                        boolean elementExtendsBaseContentObjectType = false;

                        while (!elementTypes.isEmpty()) {

                            //Retrieve the first item in queue and remove it at the same time
                            final XSComplexType complexType = elementTypes.pollFirst().asComplexType();

                            final boolean complexTypeRepresentsContentObjectType = complexType.getName() != null
                                    && CmsDefinitionItem.contentObjectType.equals(ItemUtils.createNewItem(null,
                                            complexType.getTargetNamespace(), complexType.getName()));

                            elementExtendsBaseContentObjectType = elementExtendsBaseContentObjectType
                                    || complexTypeRepresentsContentObjectType;

                            if (!complexTypeRepresentsContentObjectType && complexType.getName() != null) {

                                logger.debug("Adding superType {} for definition {}", complexType.getName(),
                                        name);

                                superTypes.add(complexType.getName());
                            }

                            visitSubProperties(complexType, complexTypeRepresentsContentObjectType);
                        }

                        if (!elementExtendsBaseContentObjectType) {
                            //Element is a global one and does not extend base contentObjectType.
                            //In this case we should dynamically load contentObjectType properties

                            XSComplexType contentObjectType = cmsDefinitionVisitor
                                    .getComplexType("contentObjectType");

                            if (contentObjectType == null) {
                                throw new CmsException("Could not locate complex type contentObjectType.");
                            }

                            visitSubProperties(contentObjectType, true);

                        }
                    } else {

                        if (element.getType().isLocal()) {
                            logger.debug(
                                    "Definition '{}' represents a complex element which is defined inside an element.",
                                    name);

                            visitSubProperties(element.getType().asComplexType(), false);
                        } else {
                            logger.debug(
                                    "Definition '{}' represents a complex element which refers to another complex type",
                                    name);

                            setComplexReference(element);
                        }
                    }
                } else
                    throw new CmsException("Element " + element.getName() + " is " + valueType
                            + "but it does not refer to a type");
            }
        }

    } catch (Throwable e) {
        logger.error("", e);
        createDefinition = false;
    }
}

From source file:io.undertow.server.handlers.proxy.LoadBalancingProxyHTTP2TestCase.java

@Test
public void testHttp2ClientMultipleStreamsThreadSafety()
        throws IOException, URISyntaxException, ExecutionException, InterruptedException, TimeoutException {
    //not actually a proxy test
    //but convent to put it here
    UndertowXnioSsl ssl = new UndertowXnioSsl(DefaultServer.getWorker().getXnio(), OptionMap.EMPTY,
            DefaultServer.SSL_BUFFER_POOL, DefaultServer.createClientSslContext());
    final UndertowClient client = UndertowClient.getInstance();
    final ClientConnection connection = client.connect(
            new URI("https", null, DefaultServer.getHostAddress(), DefaultServer.getHostPort() + 1, "/", null,
                    null),//  w ww  .  j  a  v a 2 s.c  o  m
            DefaultServer.getWorker(), ssl, DefaultServer.getBufferPool(),
            OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)).get();
    final ExecutorService service = Executors.newFixedThreadPool(10);
    try {
        Deque<FutureResult<String>> futures = new ArrayDeque<>();
        for (int i = 0; i < 100; ++i) {
            final FutureResult<String> future = new FutureResult<>();
            futures.add(future);
            service.submit(new Callable<String>() {

                @Override
                public String call() throws Exception {
                    ClientRequest cr = new ClientRequest().setMethod(Methods.GET).setPath("/path")
                            .setProtocol(Protocols.HTTP_1_1);
                    connection.sendRequest(cr, new ClientCallback<ClientExchange>() {
                        @Override
                        public void completed(ClientExchange result) {
                            result.setResponseListener(new ClientCallback<ClientExchange>() {
                                @Override
                                public void completed(ClientExchange result) {
                                    new StringReadChannelListener(DefaultServer.getBufferPool()) {
                                        @Override
                                        protected void stringDone(String string) {
                                            future.setResult(string);
                                        }

                                        @Override
                                        protected void error(IOException e) {
                                            future.setException(e);
                                        }
                                    }.setup(result.getResponseChannel());
                                }

                                @Override
                                public void failed(IOException e) {
                                    future.setException(e);
                                }
                            });
                        }

                        @Override
                        public void failed(IOException e) {
                            future.setException(e);
                        }
                    });
                    return null;
                }
            });
        }
        while (!futures.isEmpty()) {
            FutureResult<String> future = futures.poll();
            Assert.assertNotEquals(IoFuture.Status.WAITING,
                    future.getIoFuture().awaitInterruptibly(10, TimeUnit.SECONDS));
            Assert.assertEquals("/path", future.getIoFuture().get());
        }
    } finally {
        service.shutdownNow();
    }
}

From source file:jetbrains.exodus.entitystore.PersistentEntityStoreImpl.java

void deregisterTransaction(@NotNull final PersistentStoreTransaction txn) {
    final Thread thread = Thread.currentThread();
    final Deque<PersistentStoreTransaction> stack = txns.get(thread);
    if (stack == null) {
        throw new EntityStoreException("Transaction was already finished");
    }/*www . j a va 2s.c  om*/
    if (txn != stack.peek()) {
        throw new EntityStoreException("Can't finish transaction: nested transaction is not finished");
    }
    stack.pop();
    if (stack.isEmpty()) {
        txns.remove(thread);
    }
}