Example usage for java.util Queue poll

List of usage examples for java.util Queue poll

Introduction

In this page you can find the example usage for java.util Queue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:org.apache.pdfbox.pdfparser.COSParser.java

/**
 * Will parse every object necessary to load a single page from the pdf document. We try our
 * best to order objects according to offset in file before reading to minimize seek operations.
 *
 * @param dict the COSObject from the parent pages.
 * @param excludeObjects dictionary object reference entries with these names will not be parsed
 *
 * @throws IOException if something went wrong
 *///from  w  w w. j a  v  a 2  s  . c  o  m
protected void parseDictObjects(COSDictionary dict, COSName... excludeObjects) throws IOException {
    // ---- create queue for objects waiting for further parsing
    final Queue<COSBase> toBeParsedList = new LinkedList<COSBase>();
    // offset ordered object map
    final TreeMap<Long, List<COSObject>> objToBeParsed = new TreeMap<Long, List<COSObject>>();
    // in case of compressed objects offset points to stmObj
    final Set<Long> parsedObjects = new HashSet<Long>();
    final Set<Long> addedObjects = new HashSet<Long>();

    addExcludedToList(excludeObjects, dict, parsedObjects);
    addNewToList(toBeParsedList, dict.getValues(), addedObjects);

    // ---- go through objects to be parsed
    while (!(toBeParsedList.isEmpty() && objToBeParsed.isEmpty())) {
        // -- first get all COSObject from other kind of objects and
        // put them in objToBeParsed; afterwards toBeParsedList is empty
        COSBase baseObj;
        while ((baseObj = toBeParsedList.poll()) != null) {
            if (baseObj instanceof COSDictionary) {
                addNewToList(toBeParsedList, ((COSDictionary) baseObj).getValues(), addedObjects);
            } else if (baseObj instanceof COSArray) {
                final Iterator<COSBase> arrIter = ((COSArray) baseObj).iterator();
                while (arrIter.hasNext()) {
                    addNewToList(toBeParsedList, arrIter.next(), addedObjects);
                }
            } else if (baseObj instanceof COSObject) {
                COSObject obj = (COSObject) baseObj;
                long objId = getObjectId(obj);
                COSObjectKey objKey = new COSObjectKey(obj.getObjectNumber(), obj.getGenerationNumber());

                if (!parsedObjects.contains(objId)) {
                    Long fileOffset = xrefTrailerResolver.getXrefTable().get(objKey);
                    // it is allowed that object references point to null,
                    // thus we have to test
                    if (fileOffset != null && fileOffset != 0) {
                        if (fileOffset > 0) {
                            objToBeParsed.put(fileOffset, Collections.singletonList(obj));
                        } else {
                            // negative offset means we have a compressed
                            // object within object stream;
                            // get offset of object stream
                            fileOffset = xrefTrailerResolver.getXrefTable()
                                    .get(new COSObjectKey((int) -fileOffset, 0));
                            if ((fileOffset == null) || (fileOffset <= 0)) {
                                throw new IOException("Invalid object stream xref object reference for key '"
                                        + objKey + "': " + fileOffset);
                            }

                            List<COSObject> stmObjects = objToBeParsed.get(fileOffset);
                            if (stmObjects == null) {
                                stmObjects = new ArrayList<COSObject>();
                                objToBeParsed.put(fileOffset, stmObjects);
                            }
                            stmObjects.add(obj);
                        }
                    } else {
                        // NULL object
                        COSObject pdfObject = document.getObjectFromPool(objKey);
                        pdfObject.setObject(COSNull.NULL);
                    }
                }
            }
        }

        // ---- read first COSObject with smallest offset
        // resulting object will be added to toBeParsedList
        if (objToBeParsed.isEmpty()) {
            break;
        }

        for (COSObject obj : objToBeParsed.remove(objToBeParsed.firstKey())) {
            COSBase parsedObj = parseObjectDynamically(obj, false);

            obj.setObject(parsedObj);
            addNewToList(toBeParsedList, parsedObj, addedObjects);

            parsedObjects.add(getObjectId(obj));
        }
    }
}

From source file:tachyon.master.MasterInfo.java

/**
 * Get the id of the file at the given path. If recursive, it scans the subdirectories as well.
 *
 * @param path The path to start looking at
 * @param recursive If true, recursively scan the subdirectories at the given path as well
 * @return the list of the inode id's at the path
 * @throws InvalidPathException// w  ww .  ja  v a  2 s.c om
 * @throws FileDoesNotExistException
 */
public List<Integer> listFiles(TachyonURI path, boolean recursive)
        throws InvalidPathException, FileDoesNotExistException {
    List<Integer> ret = new ArrayList<Integer>();
    synchronized (mRootLock) {
        Inode inode = getInode(path);
        if (inode == null) {
            throw new FileDoesNotExistException(path.toString());
        }

        if (inode.isFile()) {
            ret.add(inode.getId());
        } else if (recursive) {
            Queue<Inode> queue = new LinkedList<Inode>();
            queue.addAll(((InodeFolder) inode).getChildren());

            while (!queue.isEmpty()) {
                Inode qinode = queue.poll();
                if (qinode.isDirectory()) {
                    queue.addAll(((InodeFolder) qinode).getChildren());
                } else {
                    ret.add(qinode.getId());
                }
            }
        } else {
            for (Inode child : ((InodeFolder) inode).getChildren()) {
                ret.add(child.getId());
            }
        }
    }

    return ret;
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Save partitions. To speed up this operation
 * runs in multiple threads.//from  w  w w . j av a2s. co m
 */
private void storeCheckpointVertices() {
    final int numPartitions = getPartitionStore().getNumPartitions();
    int numThreads = Math.min(GiraphConstants.NUM_CHECKPOINT_IO_THREADS.get(getConfiguration()), numPartitions);

    final Queue<Integer> partitionIdQueue = (numPartitions == 0) ? new LinkedList<Integer>()
            : new ArrayBlockingQueue<Integer>(numPartitions);
    Iterables.addAll(partitionIdQueue, getPartitionStore().getPartitionIds());

    final CompressionCodec codec = new CompressionCodecFactory(getConfiguration())
            .getCodec(new Path(GiraphConstants.CHECKPOINT_COMPRESSION_CODEC.get(getConfiguration())));

    long t0 = System.currentTimeMillis();

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }
                        Path path = createCheckpointFilePathSafe(
                                "_" + partitionId + CheckpointingUtils.CHECKPOINT_VERTICES_POSTFIX);

                        FSDataOutputStream uncompressedStream = getFs().create(path);

                        DataOutputStream stream = codec == null ? uncompressedStream
                                : new DataOutputStream(codec.createOutputStream(uncompressedStream));

                        Partition<I, V, E> partition = getPartitionStore().getOrCreatePartition(partitionId);

                        partition.write(stream);

                        getPartitionStore().putPartition(partition);

                        stream.close();
                        uncompressedStream.close();
                    }
                    return null;
                }

            };
        }
    };

    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "checkpoint-vertices-%d",
            getContext());

    LOG.info("Save checkpoint in " + (System.currentTimeMillis() - t0) + " ms, using " + numThreads
            + " threads");
}

From source file:org.lockss.repository.RepositoryNodeImpl.java

private List enumerateEncodedChildren(File[] children, CachedUrlSetSpec filter, boolean includeInactive) {
    // holds fully decoded immediate children
    List<File> expandedDirectories = new ArrayList<File>();

    // holds immediate children that still need to be decoded, and may
    // yield more than one expanded child
    Queue<File> unexpandedDirectories = new LinkedList<File>();

    // add initial set of unexpanded directories
    for (File file : children) {
        if (file.getName().endsWith("\\")) {
            unexpandedDirectories.add(file);
        } else {/*from ww  w.  ja  v  a  2 s  .com*/
            expandedDirectories.add(file);
        }
    }

    // keep expanding directories until no more unexpanded directories exist
    // core algorithm: BFS
    while (!unexpandedDirectories.isEmpty()) {
        File child = unexpandedDirectories.poll();
        if (child.getName().endsWith("\\")) {
            File[] newChildren = child.listFiles();
            for (File newChild : newChildren) {
                unexpandedDirectories.add(newChild);
            }
        } else {
            expandedDirectories.add(child);
        }
    }

    // using iterator to traverse safely
    Iterator<File> iter = expandedDirectories.iterator();
    while (iter.hasNext()) {
        File child = iter.next();
        if ((child.getName().equals(CONTENT_DIR)) || (!child.isDirectory())) {
            // iter remove instead of list.remove
            iter.remove();
        }
    }

    // normalization needed?
    CheckUnnormalizedMode unnormMode = RepositoryManager.getCheckUnnormalizedMode();

    // We switch to using a sorted set, this time we hold strings
    // representing the url
    List<String> subUrls = new ArrayList<String>();
    for (File child : expandedDirectories) {
        try {
            // http://root/child -> /child
            String location = child.getCanonicalPath()
                    .substring(nodeRootFile.getCanonicalFile().toString().length());
            location = decodeUrl(location);
            String oldLocation = location;
            switch (unnormMode) {
            case Log:
            case Fix:
                // Normalization done here against the url string, instead of
                // against the file in the repository. This alleviates us from
                // dealing with edge conditions where the file split occurs
                // around an encoding. e.g. %/5C is special in file, but decoded
                // URL string is %5C and we handle it correctly.
                location = normalizeTrailingQuestion(location);
                location = normalizeUrlEncodingCase(location);
                if (!oldLocation.equals(location)) {
                    switch (unnormMode) {
                    case Fix:
                        // most dangerous part done here, where we copy and
                        // delete. Maybe we should move to a lost in found instead? :)
                        String newRepoLocation = LockssRepositoryImpl
                                .mapUrlToFileLocation(repository.getRootLocation(), url + location);
                        logger.debug("Fixing unnormalized " + oldLocation + " => " + location);
                        FileUtils.copyDirectory(child, new File(newRepoLocation));
                        FileUtils.deleteDirectory(child);
                        break;
                    case Log:
                        logger.debug("Detected unnormalized " + oldLocation + ", s.b. " + location);
                        break;
                    }
                }
                break;
            }
            location = url + location;
            subUrls.add(location);
        } catch (IOException e) {
            logger.error("Normalizing (" + unnormMode + ") " + child, e);
        } catch (NullPointerException ex) {
            logger.error("Normalizing (" + unnormMode + ") " + child, ex);
        }
    }

    int listSize;
    if (filter == null) {
        listSize = subUrls.size();
    } else {
        // give a reasonable minimum since, if it's filtered, the array size
        // may be much smaller than the total children, particularly in very
        // flat trees
        listSize = Math.min(40, subUrls.size());
    }

    // generate the arraylist with urls and return
    ArrayList childL = new ArrayList();
    for (String childUrl : subUrls) {
        if ((filter == null) || (filter.matches(childUrl))) {
            try {
                RepositoryNode node = repository.getNode(childUrl);
                if (node == null)
                    continue;
                // add all nodes which are internal or active leaves
                // deleted nodes never included
                // boolean activeInternal = !node.isLeaf() && !node.isDeleted();
                // boolean activeLeaf = node.isLeaf() && !node.isDeleted() &&
                // (!node.isContentInactive() || includeInactive);
                // if (activeInternal || activeLeaf) {
                if (!node.isDeleted() && (!node.isContentInactive() || (includeInactive || !node.isLeaf()))) {
                    childL.add(node);
                }
            } catch (MalformedURLException ignore) {
                // this can safely skip bad files because they will
                // eventually be trimmed by the repository integrity checker
                // and the content will be replaced by a poll repair
                logger.error("Malformed child url: " + childUrl);
            }
        }
    }
    return childL;
}

From source file:org.stem.db.compaction.CompactionManager.java

private void performSinglePassCompaction(MountPoint mp) throws IOException {
    // TODO: lock?
    if (!exceedThreshold(mp))
        return;//from   ww  w.  ja v  a 2s  .c om

    // Get FULL files ready for compaction
    Collection<FatFile> scanReadyFFs = mp.findReadyForCompaction();

    if (!exceedCandidatesThreshold(scanReadyFFs))
        return;

    Queue<FatFile> originalFFs = new LinkedList<FatFile>();

    FatFile temporaryFF = null;
    int iterated = 0;
    int omitted = 0;
    for (FatFile currentFF : scanReadyFFs) {
        FFScanner scanner = new FFScanner(currentFF);

        while (scanner.hasNext()) {
            iterated += 1;
            Blob blob = scanner.next();
            String blobKey = Hex.encodeHexString(blob.key());
            if (blob.deleted()) {
                omitted += 1;
                mp.getDataTracker().removeDeletes(blob.key(), blob.size(), currentFF.id);
                logger.info("key 0x{} omitted as deleted", Hex.encodeHexString(blob.key()));
                continue;
            }

            ExtendedBlobDescriptor localDescriptor = new ExtendedBlobDescriptor(blob.key(), blob.size(),
                    mp.uuid, blob.getDescriptor());
            ExtendedBlobDescriptor remoteDescriptor = client.readMeta(blob.key(), mp.uuid);
            if (null == remoteDescriptor) {
                omitted += 1;
                logger.info("key 0x{} omitted as no meta info", Hex.encodeHexString(blob.key()));
                continue;
            }
            // As we eventual consistent then: if blob.hasInvalidOffset -> continue
            if (!descriptorsAreConsistent(localDescriptor, remoteDescriptor)) {
                logger.info("key 0x{} omitted as inconsistent meta", Hex.encodeHexString(blob.key()));
                continue;
            }

            if (null == temporaryFF) {
                temporaryFF = createTemporaryFF(currentFF.id);
            }

            if (temporaryFF.hasSpaceFor(blob)) {
                BlobDescriptor descriptor = temporaryFF.writeBlob(blob); // TODO: hold descriptors for a subsequent MetaStore updates
                logger.info("key 0x{} is written to temporaryFF", Hex.encodeHexString(blob.key()));
                continue;
            }

            // If we are here then we can't write blob to temporary file because the temporaryFF is full

            // mark temporaryFF FULL
            temporaryFF.writeIndex();
            temporaryFF.writeFullMarker();

            // Replace original FF with temporary FF
            FatFile originalFF = originalFFs.poll();
            replaceFF(originalFF, temporaryFF);
            updateMeta(originalFF, mp);
            markAllOriginalFFsAsBlank(originalFFs, mp);

            temporaryFF.close(); // TODO: this must be strictly synchronized
            FileUtils.forceDelete(new File(temporaryFF.getPath())); // remove file
            temporaryFF = null;

            // Once temporary file exceeded its capacity create another one
            temporaryFF = createTemporaryFF(currentFF.id);
            // And write blob to it
            BlobDescriptor descriptor = temporaryFF.writeBlob(blob); // TODO: hold descriptors for a subsequent MetaStore updates
        }

        originalFFs.add(currentFF); // When compaction finish this file would be marked as BLANK
    }

    // All candidates are iterated
    // Write the rest of TMP FatFile to StorageNode as usual and mark iterated FFs as BLANK
    if (null != temporaryFF) {
        FFScanner scanner = new FFScanner(temporaryFF);
        int restBlobs = 0;
        while (scanner.hasNext()) {

            restBlobs += 1;
            Blob blob = scanner.next();
            WriteBlobMessage message = new WriteBlobMessage(mp.uuid, blob.key(), blob.data());// TODO: direct access to fields?
            mp.getDataTracker().remove(blob.key(), blob.size());

            // TODO: too heterogeneous. Should be Blob.Descriptor or something like that
            StorageService.instance.write(message);
            logger.info("key 0x{} moved", Hex.encodeHexString(blob.key()));
        }
        temporaryFF.close();
        FileUtils.forceDelete(new File(temporaryFF.getPath())); // remove file
        temporaryFF = null;
    }

    // Mark the rest of files as BLANK
    markAllOriginalFFsAsBlank(originalFFs, mp);
    if (null != temporaryFF) {
        FileUtils.forceDelete(new File(temporaryFF.getPath())); // remove file
        temporaryFF = null;
    }

    // TODO: delete temporary file
}

From source file:com.stehno.sanctuary.core.archive.DefaultFileArchiver.java

@Override
public MessageSet archiveChanges(final ChangeSet changeSet) {
    if (log.isDebugEnabled())
        log.debug("Archiving changes for " + changeSet.getRootDirectory() + "...");

    final MessageSet messageSet = new MessageSet(changeSet.getRootDirectory().getPath());

    final Queue<Future> futures = new LinkedList<Future>();

    int count = 0;
    for (final File file : changeSet.listFiles(FileStatus.NEW)) {
        futures.add(executor.submit(new Runnable() {
            @Override/*ww  w  .  j  av  a 2 s  . com*/
            public void run() {
                try {
                    remoteStore.addFile(changeSet.getRootDirectory(), file);
                    localStore.storeFile(file);
                    messageSet.addMessage(file.getPath(), "Added successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Add failed: " + ex.getMessage());
                }
            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Adds: " + count);

    count = 0;
    for (final File file : changeSet.listFiles(FileStatus.MODIFIED)) {
        futures.add(executor.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    remoteStore.updateFile(changeSet.getRootDirectory(), file);
                    localStore.storeFile(file);
                    messageSet.addMessage(file.getPath(), "Updated successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Update failed: " + ex.getMessage());
                }

            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Updates: " + count);

    count = 0;
    for (final File file : changeSet.listFiles(FileStatus.DELETED)) {
        futures.add(executor.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    remoteStore.deleteFile(changeSet.getRootDirectory(), file);
                    localStore.removeFile(file);
                    messageSet.addMessage(file.getPath(), "Deleted successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Delete failed: " + ex.getMessage());
                }

            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Deletes: " + count);

    do {
        while (!futures.isEmpty()) {
            if (futures.peek().isDone()) {
                futures.poll();
            }
        }

        if (collectionWaitTime > 0) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException ie) {
            }
        }

    } while (!futures.isEmpty());

    return messageSet;
}

From source file:org.zkoss.ganttz.data.GanttDiagramGraph.java

List<Recalculation> getRecalculationsNeededFrom(V task) {
    List<Recalculation> result = new ArrayList<>();
    Set<Recalculation> parentRecalculationsAlreadyDone = new HashSet<>();
    Recalculation first = recalculationFor(allPointsPotentiallyModified(task));
    first.couldHaveBeenModifiedBeforehand();

    result.addAll(getParentsRecalculations(parentRecalculationsAlreadyDone, first.taskPoint));
    result.add(first);//ww w.  j a va2  s .  c  o  m

    Queue<Recalculation> pendingOfVisit = new LinkedList<>();
    pendingOfVisit.offer(first);

    Map<Recalculation, Recalculation> alreadyVisited = new HashMap<>();
    alreadyVisited.put(first, first);

    while (!pendingOfVisit.isEmpty()) {

        Recalculation current = pendingOfVisit.poll();

        for (TaskPoint each : current.taskPoint.getImmediateSuccessors()) {

            if (each.isImmediatelyDerivedFrom(current.taskPoint)) {
                continue;
            }

            Recalculation recalculationToAdd = getRecalcualtionToAdd(each, alreadyVisited);
            recalculationToAdd.comesFromPredecessor(current);

            if (!alreadyVisited.containsKey(recalculationToAdd)) {
                result.addAll(getParentsRecalculations(parentRecalculationsAlreadyDone, each));
                result.add(recalculationToAdd);
                pendingOfVisit.offer(recalculationToAdd);
                alreadyVisited.put(recalculationToAdd, recalculationToAdd);
            }
        }
    }

    return topologicalSorter.sort(result);
}

From source file:de.hybris.platform.test.ThreadPoolTest.java

/**
 * CORE-66PLA-10816 Potential chance to fetch a PoolableThread with pending transaction from previous run
 * // w  w  w  .  ja  va2  s. c  o  m
 * together with setting logger level for a log4j.logger.de.hybris.platform.util.threadpool=DEBUG prints out
 * information who/where started the stale transaction
 */
@Test
public void testTransactionCleanUp() throws Exception {
    final Queue<Transaction> recordedTransactions = new ConcurrentLinkedQueue<Transaction>();

    final boolean flagBefore = Config.getBoolean("transaction.monitor.begin", false);
    Config.setParameter("transaction.monitor.begin", "true");
    ThreadPool pool = null;

    try {
        // create own pool since we don't want to mess up the system
        pool = new ThreadPool(Registry.getCurrentTenantNoFallback().getTenantID(), MAX_THREADS);

        final GenericObjectPool.Config config = new GenericObjectPool.Config();
        config.maxActive = MAX_THREADS;
        config.maxIdle = 1;
        config.maxWait = -1;
        config.whenExhaustedAction = GenericObjectPool.WHEN_EXHAUSTED_BLOCK;
        config.testOnBorrow = true;
        config.testOnReturn = true;
        config.timeBetweenEvictionRunsMillis = 30 * 1000; // keep idle threads for at most 30 sec
        pool.setConfig(config);

        final int maxSize = pool.getMaxActive();
        final int activeBefore = pool.getNumActive();
        final List<NoClosingTransactionProcess> started = new ArrayList<NoClosingTransactionProcess>(maxSize);
        for (int i = activeBefore; i < maxSize; i++) {
            final PoolableThread poolableThread = pool.borrowThread();
            final NoClosingTransactionProcess noClosingTransactionProcess = new NoClosingTransactionProcess();
            started.add(noClosingTransactionProcess);
            poolableThread.execute(noClosingTransactionProcess);
        }
        Thread.sleep(1000);

        transacationStartingBarrier.await(); //await for all transacations to start

        //record all started  transactions
        for (final NoClosingTransactionProcess singleStarted : started) {
            recordedTransactions.add(singleStarted.getStartedTransaction());
        }

        finishedStaleTransactionLatch.await(180, TimeUnit.SECONDS);
        Thread.sleep(1000);//give them 1 second to finish

        final List<HasNoCurrentRunningTransactionProcess> ranAfter = new ArrayList<HasNoCurrentRunningTransactionProcess>(
                maxSize);
        Transaction recordedTransaction = recordedTransactions.poll();
        do {
            final PoolableThread poolableThread = pool.borrowThread();
            final HasNoCurrentRunningTransactionProcess hasNoCurrentRunningTransactionProcess = new HasNoCurrentRunningTransactionProcess(
                    recordedTransaction);
            ranAfter.add(hasNoCurrentRunningTransactionProcess);
            poolableThread.execute(hasNoCurrentRunningTransactionProcess);
            recordedTransaction = recordedTransactions.poll();
        } while (recordedTransaction != null);
        //still can borrow
        Assert.assertNotNull(pool.borrowThread());
        Thread.sleep(1000);

        //verify if really Thread had a non started transaction on the enter
        for (final HasNoCurrentRunningTransactionProcess singleRanAfter : ranAfter) {
            if (singleRanAfter.getException() != null) {
                singleRanAfter.getException().printException();
                Assert.fail("Some of the thread(s) captured not finshied transaction in the pool ");
            }
        }
    } finally {
        if (pool != null) {
            try {
                pool.close();
            } catch (final Exception e) {
                // can't help it
            }
        }
        Config.setParameter("transaction.monitor.begin", BooleanUtils.toStringTrueFalse(flagBefore));

    }
}

From source file:tachyon.master.MasterInfo.java

/**
 * Get absolute paths of all in memory files.
 *
 * @return absolute paths of all in memory files.
 *//*from www . ja  va 2 s .  c om*/
public List<TachyonURI> getInMemoryFiles() {
    List<TachyonURI> ret = new ArrayList<TachyonURI>();
    LOG.info("getInMemoryFiles()");
    Queue<Pair<InodeFolder, TachyonURI>> nodesQueue = new LinkedList<Pair<InodeFolder, TachyonURI>>();
    synchronized (mRootLock) {
        // TODO: Verify we want to use absolute path.
        nodesQueue.add(new Pair<InodeFolder, TachyonURI>(mRoot, new TachyonURI(TachyonURI.SEPARATOR)));
        while (!nodesQueue.isEmpty()) {
            Pair<InodeFolder, TachyonURI> tPair = nodesQueue.poll();
            InodeFolder tFolder = tPair.getFirst();
            TachyonURI curUri = tPair.getSecond();

            Set<Inode> children = tFolder.getChildren();
            for (Inode tInode : children) {
                TachyonURI newUri = curUri.join(tInode.getName());
                if (tInode.isDirectory()) {
                    nodesQueue.add(new Pair<InodeFolder, TachyonURI>((InodeFolder) tInode, newUri));
                } else if (((InodeFile) tInode).isFullyInMemory()) {
                    ret.add(newUri);
                }
            }
        }
    }
    return ret;
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Save the edges using the user-defined EdgeOutputFormat from our
 * vertexArray based on the split.//  w w  w .j av  a 2 s  . c  o m
 *
 * @throws InterruptedException
 */
private void saveEdges() throws IOException, InterruptedException {
    final ImmutableClassesGiraphConfiguration<I, V, E> conf = getConfiguration();

    if (conf.getEdgeOutputFormatClass() == null) {
        LOG.warn("saveEdges: " + GiraphConstants.EDGE_OUTPUT_FORMAT_CLASS
                + "Make sure that the EdgeOutputFormat is not required.");
        return;
    }

    final int numPartitions = getPartitionStore().getNumPartitions();
    int numThreads = Math.min(conf.getNumOutputThreads(), numPartitions);
    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
            "saveEdges: Starting to save the edges using " + numThreads + " threads");
    final EdgeOutputFormat<I, V, E> edgeOutputFormat = conf.createWrappedEdgeOutputFormat();

    final Queue<Integer> partitionIdQueue = (numPartitions == 0) ? new LinkedList<Integer>()
            : new ArrayBlockingQueue<Integer>(numPartitions);
    Iterables.addAll(partitionIdQueue, getPartitionStore().getPartitionIds());

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    EdgeWriter<I, V, E> edgeWriter = edgeOutputFormat.createEdgeWriter(getContext());
                    edgeWriter.setConf(conf);
                    edgeWriter.initialize(getContext());

                    long nextPrintVertices = 0;
                    long nextPrintMsecs = System.currentTimeMillis() + 15000;
                    int partitionIndex = 0;
                    int numPartitions = getPartitionStore().getNumPartitions();
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }

                        Partition<I, V, E> partition = getPartitionStore().getOrCreatePartition(partitionId);
                        long vertices = 0;
                        long edges = 0;
                        long partitionEdgeCount = partition.getEdgeCount();
                        for (Vertex<I, V, E> vertex : partition) {
                            for (Edge<I, E> edge : vertex.getEdges()) {
                                edgeWriter.writeEdge(vertex.getId(), vertex.getValue(), edge);
                                ++edges;
                            }
                            ++vertices;

                            // Update status at most every 250k vertices or 15 seconds
                            if (vertices > nextPrintVertices && System.currentTimeMillis() > nextPrintMsecs) {
                                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                                        "saveEdges: Saved " + edges + " edges out of " + partitionEdgeCount
                                                + " partition edges, on partition " + partitionIndex
                                                + " out of " + numPartitions);
                                nextPrintMsecs = System.currentTimeMillis() + 15000;
                                nextPrintVertices = vertices + 250000;
                            }
                        }
                        getPartitionStore().putPartition(partition);
                        ++partitionIndex;
                    }
                    edgeWriter.close(getContext()); // the temp results are saved now
                    return null;
                }
            };
        }
    };
    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "save-vertices-%d", getContext());

    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO, "saveEdges: Done saving edges.");
    // YARN: must complete the commit the "task" output, Hadoop isn't there.
    if (conf.isPureYarnJob() && conf.getVertexOutputFormatClass() != null) {
        try {
            OutputCommitter outputCommitter = edgeOutputFormat.getOutputCommitter(getContext());
            if (outputCommitter.needsTaskCommit(getContext())) {
                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                        "OutputCommitter: committing task output.");
                // transfer from temp dirs to "task commit" dirs to prep for
                // the master's OutputCommitter#commitJob(context) call to finish.
                outputCommitter.commitTask(getContext());
            }
        } catch (InterruptedException ie) {
            LOG.error("Interrupted while attempting to obtain " + "OutputCommitter.", ie);
        } catch (IOException ioe) {
            LOG.error("Master task's attempt to commit output has " + "FAILED.", ioe);
        }
    }
}