Example usage for java.util LinkedList addAll

List of usage examples for java.util LinkedList addAll

Introduction

In this page you can find the example usage for java.util LinkedList addAll.

Prototype

public boolean addAll(Collection<? extends E> c) 

Source Link

Document

Appends all of the elements in the specified collection to the end of this list, in the order that they are returned by the specified collection's iterator.

Usage

From source file:org.commoncrawl.rpc.base.internal.AsyncClientChannel.java

private synchronized void cancelOutgoingMessages() {

    LinkedList<AsyncRequest<RPCStruct, RPCStruct>> tempList = new LinkedList<AsyncRequest<RPCStruct, RPCStruct>>();

    tempList.addAll(_sendQueue);

    _sendQueue.clear();//from  w ww .  ja  v a2 s  .  c o m

    for (AsyncRequest<RPCStruct, RPCStruct> request : tempList) {
        request.setStatus(AsyncRequest.Status.Error_RPCFailed);
        if (request.getCallback() != null) {
            request.getCallback().requestComplete(request);
        }
    }
    _sendQueue.clear();
}

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse sample TBMM Journal w/o bulk parse")
public void parseTbmmJournal_b0241h_noBulkParse() throws Exception {
    final File tokenizedFile = new File("core/src/test/resources/tokenizer/tbmm_b0241h_tokenized.txt");
    final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
    final LinkedList<String> words = new LinkedList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();
    for (String line : lines) {
        final ArrayList<String> strings = Lists
                .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
        words.addAll(strings);
        uniqueWords.addAll(strings);// w  w w  .  ja  v  a  2 s . co m
    }

    final int initialL1CacheSize = uniqueWords.size();
    final int maxL1CacheSize = initialL1CacheSize;

    final MorphologicParserCache l1Cache = new LRUMorphologicParserCache(NUMBER_OF_THREADS, initialL1CacheSize,
            maxL1CacheSize);

    final ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(new TwoLevelMorphologicParserCache(BULK_SIZE, l1Cache),
                contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i++) {
        final MorphologicParser parser = parsers[i % NUMBER_OF_THREADS];
        final String word = words.get(i);
        final int wordIndex = i;
        pool.execute(new SingleParseCommand(parser, word, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(500, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:at.illecker.hama.hybrid.examples.onlinecf.OnlineCF.java

public List<KeyValuePair<Long, Double>> getMostSimilarItems(long item, int count) {

    Comparator<KeyValuePair<Long, Double>> similarityComparator = new Comparator<KeyValuePair<Long, Double>>() {

        @Override/*from   w  w w  . j  av  a2  s .c o  m*/
        public int compare(KeyValuePair<Long, Double> arg0, KeyValuePair<Long, Double> arg1) {
            double difference = arg0.getValue().doubleValue() - arg1.getValue().doubleValue();
            return (int) (100000 * difference);
        }
    };
    PriorityQueue<KeyValuePair<Long, Double>> queue = new PriorityQueue<KeyValuePair<Long, Double>>(count,
            similarityComparator);
    LinkedList<KeyValuePair<Long, Double>> results = new LinkedList<KeyValuePair<Long, Double>>();
    for (Long candidateItem : m_modelItemFactorizedValues.keySet()) {
        double similarity = calculateItemSimilarity(item, candidateItem);
        KeyValuePair<Long, Double> targetItem = new KeyValuePair<Long, Double>(candidateItem, similarity);
        queue.add(targetItem);
    }
    results.addAll(queue);
    return results;
}

From source file:library.memorymonitor.ProcfsBasedProcessTree.java

/**
 * Get the process-tree with latest state. If the root-process is not alive,
 * an empty tree will be returned./*from  w w  w . j  a  va 2s .  c om*/
 * 
 * @return the process-tree with latest state.
 */
public ProcfsBasedProcessTree getProcessTree() {
    if (!pid.equals(deadPid)) {
        // Get the list of processes
        List<String> processList = getProcessList();

        Map<String, ProcessInfo> allProcessInfo = new HashMap<String, ProcessInfo>();

        // cache the processTree to get the age for processes
        Map<String, ProcessInfo> oldProcs = new HashMap<String, ProcessInfo>(processTree);
        processTree.clear();

        ProcessInfo me = null;
        for (String proc : processList) {
            // Get information for each process
            ProcessInfo pInfo = new ProcessInfo(proc);
            if (constructProcessInfo(pInfo, procfsDir) != null) {
                allProcessInfo.put(proc, pInfo);
                if (proc.equals(this.pid)) {
                    me = pInfo; // cache 'me'
                    processTree.put(proc, pInfo);
                }
            }
        }

        if (me == null) {
            return this;
        }

        // Add each process to its parent.
        for (Map.Entry<String, ProcessInfo> entry : allProcessInfo.entrySet()) {
            String pID = entry.getKey();
            if (!pID.equals("1")) {
                ProcessInfo pInfo = entry.getValue();
                ProcessInfo parentPInfo = allProcessInfo.get(pInfo.getPpid());
                if (parentPInfo != null) {
                    parentPInfo.addChild(pInfo);
                }
            }
        }

        // now start constructing the process-tree
        LinkedList<ProcessInfo> pInfoQueue = new LinkedList<ProcessInfo>();
        pInfoQueue.addAll(me.getChildren());
        while (!pInfoQueue.isEmpty()) {
            ProcessInfo pInfo = pInfoQueue.remove();
            if (!processTree.containsKey(pInfo.getPid())) {
                processTree.put(pInfo.getPid(), pInfo);
            }
            pInfoQueue.addAll(pInfo.getChildren());
        }

        // update age values and compute the number of jiffies since last update
        for (Map.Entry<String, ProcessInfo> procs : processTree.entrySet()) {
            ProcessInfo oldInfo = oldProcs.get(procs.getKey());
            if (procs.getValue() != null) {
                procs.getValue().updateJiffy(oldInfo);
                if (oldInfo != null) {
                    procs.getValue().updateAge(oldInfo);
                }
            }
        }

        if (LOG.isDebugEnabled()) {
            // Log.debug the ProcfsBasedProcessTree
            LOG.debug(this.toString());
        }
    }
    return this;
}

From source file:org.apache.maven.doxia.linkcheck.DefaultLinkCheck.java

/**
 * Gets the comma separated list of effective exclude patterns.
 *
 * @return The comma separated list of effective exclude patterns, never <code>null</code>.
 *//*from  ww  w  . j a  va  2  s .  co m*/
private String getExcludedPages() {
    @SuppressWarnings("unchecked")
    LinkedList<String> patternList = new LinkedList<String>(FileUtils.getDefaultExcludesAsList());

    if (excludedPages != null) {
        patternList.addAll(Arrays.asList(excludedPages));
    }

    return StringUtils.join(patternList.iterator(), ",");
}

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse sample TBMM Journal with bulk parse")
public void parseTbmmJournal_b0241h_withBulkParse() throws Exception {
    final File tokenizedFile = new File("core/src/test/resources/tokenizer/tbmm_b0241h_tokenized.txt");
    final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
    final LinkedList<String> words = new LinkedList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();
    for (String line : lines) {
        final ArrayList<String> strings = Lists
                .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
        words.addAll(strings);
        uniqueWords.addAll(strings);/*from  w w  w. ja  v  a2  s  .  co  m*/
    }

    final int initialL1CacheSize = uniqueWords.size();
    final int maxL1CacheSize = initialL1CacheSize;

    final MorphologicParserCache l1Cache = new LRUMorphologicParserCache(NUMBER_OF_THREADS, initialL1CacheSize,
            maxL1CacheSize);

    final ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(new TwoLevelMorphologicParserCache(BULK_SIZE, l1Cache),
                contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i = i + BULK_SIZE) {
        final MorphologicParser parser = parsers[(i / BULK_SIZE) % NUMBER_OF_THREADS];
        int start = i;
        int end = i + BULK_SIZE < words.size() ? i + BULK_SIZE : words.size();
        final int wordIndex = i;

        final List<String> subWordList = words.subList(start, end);
        pool.execute(new BulkParseCommand(parser, subWordList, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(500, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:cross.io.misc.WorkflowZipper.java

/**
 * Saves the currently assigned workflow elements, matching currently
 * assigned FileFilter to File. Marks all files for deletion on exit.
 *
 * @param f the file to save to//from w w  w  .  j  ava2 s.  com
 * @return true if the workflow was zipped, false otherwise
 * @throws RuntimeException if IOExceptions are encountered
 */
public boolean save(final File f) {
    if (this.zipWorkflow) {
        HashSet<String> zipEntries = new HashSet<>();
        final int bufsize = 1024;
        final File zipFile = f;
        ZipOutputStream zos;
        try {
            final FileOutputStream fos = new FileOutputStream(zipFile);
            zos = new ZipOutputStream(new BufferedOutputStream(fos));
            log.info("Created zip output stream");
            final byte[] input_buffer = new byte[bufsize];
            File basedir = FileTools.prependDefaultDirsWithPrefix("", null, this.iw.getStartupDate());
            if (this.deleteOnExit) {
                log.info("marked basedir for deletion on exit: {}", basedir);
                basedir.deleteOnExit();
            }
            if (flatten) {
                log.info("setting basedir to parent file: {}", basedir.getParentFile());
                basedir = basedir.getParentFile();
                final Iterator<IWorkflowResult> iter = this.iw.getResults();
                while (iter.hasNext()) {
                    final IWorkflowResult iwr = iter.next();
                    if (iwr instanceof IWorkflowFileResult) {
                        final IWorkflowFileResult iwfr = (IWorkflowFileResult) iwr;
                        final File file = iwfr.getFile();
                        log.info("Retrieving file result {}", file);
                        // mark file for deletion
                        final File parent = file.getParentFile();
                        log.info("Retrieving parent of file result {}", parent);
                        // Also delete the parent directory in which file was
                        // contained,
                        // unless it is the base directory + possibly additional
                        // defaultDirs
                        if (parent.getAbsolutePath().startsWith(basedir.getAbsolutePath())
                                && !parent.getAbsolutePath().equals(basedir.getAbsolutePath())) {
                            log.info("Marking file and parent for deletion");
                            if (this.deleteOnExit) {
                                parent.deleteOnExit();
                                file.deleteOnExit();
                            }
                        }
                        if (file.getAbsolutePath().startsWith(basedir.getAbsolutePath())) {
                            log.info("Marking file for deletion");
                            if (this.deleteOnExit) {
                                file.deleteOnExit();
                            }
                        }
                        if ((this.ff != null) && !this.ff.accept(file)) {
                            // Skip file if file filter does not accept it
                            continue;
                        } else {
                            log.info("Adding zip entry!");
                            addZipEntry(bufsize, zos, input_buffer, file, zipEntries);
                        }
                    }

                }
            } else {
                LinkedList<File> files = new LinkedList<>(Arrays.asList(basedir.listFiles(ff)));
                File archiveBase = basedir.getParentFile();
                while (!files.isEmpty()) {
                    File currentFile = files.removeFirst();
                    if (currentFile.isDirectory()) {
                        files.addAll(Arrays.asList(currentFile.listFiles(ff)));
                    } else {
                        try {
                            String relativePath = FileTools.getRelativeFile(archiveBase, currentFile).getPath()
                                    .replaceAll("\\\\", "/");
                            log.info("Adding zip entry for {} below {}", relativePath, archiveBase);
                            addRelativeZipEntry(bufsize, zos, input_buffer, relativePath, currentFile,
                                    zipEntries);
                        } catch (Exception ex) {
                            log.warn("Caught exception while retrieving relative path:", ex);
                        }
                    }
                    if (this.deleteOnExit) {
                        log.info("Marking file for deletion");
                        currentFile.deleteOnExit();
                    }
                }
            }

            try {
                zos.flush();
                zos.close();
            } catch (final IOException e) {
                throw new RuntimeException(e);
            }
        } catch (final IOException e) {
            throw new RuntimeException(e);
        }
        return true;
    } else {
        log.debug("Configured to not zip Workflow results!");
        return false;
    }
}

From source file:at.illecker.hama.hybrid.examples.onlinecf.OnlineCF.java

public List<KeyValuePair<Long, Double>> getMostSimilarUsers(long user, int count) {

    Comparator<KeyValuePair<Long, Double>> similarityComparator = new Comparator<KeyValuePair<Long, Double>>() {

        @Override/* w ww. ja  va  2 s  .  c  o  m*/
        public int compare(KeyValuePair<Long, Double> arg0, KeyValuePair<Long, Double> arg1) {
            double difference = arg0.getValue().doubleValue() - arg1.getValue().doubleValue();
            return (int) (100000 * difference);
        }
    };

    PriorityQueue<KeyValuePair<Long, Double>> queue = new PriorityQueue<KeyValuePair<Long, Double>>(count,
            similarityComparator);

    LinkedList<KeyValuePair<Long, Double>> results = new LinkedList<KeyValuePair<Long, Double>>();
    for (Long candidateUser : m_modelUserFactorizedValues.keySet()) {
        double similarity = calculateUserSimilarity(user, candidateUser);
        KeyValuePair<Long, Double> targetUser = new KeyValuePair<Long, Double>(candidateUser, similarity);
        queue.add(targetUser);
    }
    results.addAll(queue);
    return results;
}

From source file:org.martin.ftp.net.FTPLinker.java

/**
 * Retorna una lista de archivos del directorio especificado
 * @param directory Ruta del directorio del cual se desea 
 * obtener la lista//  w ww  . j  a  v a 2  s.  c  om
 * @return Lista de archivos y carpetas creadas a partir 
 * del parametro, se ubican primero las carpetas y luego los
 * archivos
 * @throws IOException 
 */

public LinkedList<FTPFile> getOrderedFiles(String directory) throws IOException {

    LinkedList<FTPFile> files = new LinkedList<>();
    files.addAll(Arrays.asList(getDirectories(directory)));
    files.addAll(Arrays.asList(getFiles(directory)));
    return files;
}

From source file:org.martin.ftp.net.FTPLinker.java

/**
 * Retorna una lista con los archivos y carpetas de la ruta actual ordenados
 * primero por directorios y luego los archivos
 * @return Lista que contiene los archivos y carpetas de la ruta actual, 
 * ubicando primero las carpetas y luego los archivos
 * @throws IOException /* ww  w  .j a  va2  s.  co m*/
 */

public LinkedList<FTPFile> getOrderedFiles() throws IOException {

    LinkedList<FTPFile> files = new LinkedList<>();
    files.addAll(toList(getDirectories()));
    files.addAll(toList(getFiles()));
    return files;
}