Example usage for java.util Queue size

List of usage examples for java.util Queue size

Introduction

In this page you can find the example usage for java.util Queue size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this collection.

Usage

From source file:org.jboss.errai.ioc.rebind.ioc.graph.impl.DependencyGraphBuilderImpl.java

private void removeUnreachableConcreteInjectables() {
    final Set<String> reachableNames = new HashSet<String>();
    final Queue<Injectable> processingQueue = new LinkedList<Injectable>();
    for (final Injectable injectable : concretesByName.values()) {
        if (!injectable.getWiringElementTypes().contains(WiringElementType.Simpleton)
                && !reachableNames.contains(injectable.getFactoryName())) {
            processingQueue.add(injectable);
            do {//from w  w w  .j  a  v  a2s  .  c o  m
                final Injectable processedInjectable = processingQueue.poll();
                reachableNames.add(processedInjectable.getFactoryName());
                for (final Dependency dep : processedInjectable.getDependencies()) {
                    final Injectable resolvedDep = getResolvedDependency(dep, processedInjectable);
                    if (!reachableNames.contains(resolvedDep.getFactoryName())) {
                        processingQueue.add(resolvedDep);
                    }
                }
            } while (processingQueue.size() > 0);
        }
    }

    concretesByName.keySet().retainAll(reachableNames);
}

From source file:org.eobjects.analyzer.result.AnalyzerResultFutureTest.java

public void testMultiThreadedListenerScenario() throws Exception {
    final int threadCount = 10;

    final Thread[] threads = new Thread[threadCount];
    @SuppressWarnings({ "unchecked" })
    final Listener<NumberResult>[] listeners = new Listener[threadCount];
    final Queue<Object> resultQueue = new ArrayBlockingQueue<>(threadCount);

    for (int i = 0; i < listeners.length; i++) {
        listeners[i] = new Listener<NumberResult>() {
            @Override/*from  w ww  . ja v a2s .c  o m*/
            public void onSuccess(NumberResult result) {
                resultQueue.add(result);
            }

            @Override
            public void onError(RuntimeException error) {
                resultQueue.add(error);
            }
        };
    }

    final Ref<NumberResult> resultRef = new LazyRef<NumberResult>() {
        @Override
        protected NumberResult fetch() throws Throwable {
            long randomSleepTime = (long) (1000 * Math.random());
            Thread.sleep(randomSleepTime);
            return new NumberResult(43);
        }
    };

    final AnalyzerResultFuture<NumberResult> future = new AnalyzerResultFuture<>("foo", resultRef);

    for (int i = 0; i < threads.length; i++) {
        final Listener<NumberResult> listener = listeners[i];
        threads[i] = new Thread() {
            @Override
            public void run() {
                future.addListener(listener);
            }
        };
    }

    final int halfOfTheThreads = threads.length / 2;
    for (int i = 0; i < halfOfTheThreads; i++) {
        threads[i].start();
    }
    for (int i = 0; i < halfOfTheThreads; i++) {
        threads[i].join();
    }

    future.get();

    assertEquals("[43, 43, 43, 43, 43]", resultQueue.toString());
    assertEquals(halfOfTheThreads, resultQueue.size());

    for (int i = halfOfTheThreads; i < threads.length; i++) {
        threads[i].start();
    }
    for (int i = halfOfTheThreads; i < threads.length; i++) {
        threads[i].join();
    }

    assertEquals("[43, 43, 43, 43, 43, 43, 43, 43, 43, 43]", resultQueue.toString());
    assertEquals(threads.length, resultQueue.size());
}

From source file:candr.yoclip.Parser.java

/**
 * Creates a {@code ParsedOptionParameter} for the option parameter at the head of the queue. The parsed option will not contain an error if an
 * option value is missing. The parsed option will contain an error if the option appears to have an associated value and does not take a value.
 *
 * @param parameters The current queue of command parameters.
 * @return a parsed option parameter or {@code null} in the following cases.
 * <ul>/* w  w w  . ja v a  2 s.  c o  m*/
 * <li>The parameters queue is empty.</li>
 * <li>The head of the parameters queue is not an option.</li>
 * </ul>
 */
protected ParsedOption<T> getParsedOption(final Queue<String> parameters) {

    ParsedOption<T> parsedOptionParameter = null;

    final String parameter = parameters.peek();
    if (!StringUtils.isEmpty(parameter) && isOption(parameter)) {

        final String prefix = getParserOptions().getPrefix();
        final String separator = getParserOptions().getSeparator();
        final boolean isSeparatorWhitespace = StringUtils.isWhitespace(separator);

        final int separatorIndex = isSeparatorWhitespace ? -1 : parameter.indexOf(separator);
        final String optionParameterKey = parameter.substring(prefix.length(),
                separatorIndex < 0 ? parameter.length() : separatorIndex);
        final ParserOption<T> optionParameter = getParserOptions().get(optionParameterKey);
        if (null != optionParameter) {

            parameters.remove();

            // get the value if the option takes one
            if (optionParameter.hasValue()) {

                String value = null;
                if (isSeparatorWhitespace) {

                    if (parameters.size() > 0 && !isOption(parameters.peek())) {

                        // remove the value from the queue
                        value = parameters.remove();
                    }

                } else if (separatorIndex != -1) {

                    final int valueIndex = separatorIndex + 1;
                    if (valueIndex < parameter.length()) {
                        value = parameter.substring(valueIndex);
                    }
                }

                // The value can be null here, without it being an error condition, to facilitate actions later on
                // such as using a default.
                parsedOptionParameter = new ParsedOption<T>(optionParameter, value);

            } else if (separatorIndex > 1) {

                // if the separator is not white space and a value was present with the option parameter
                parsedOptionParameter = new ParsedOption<T>(optionParameter, null);
                parsedOptionParameter.setError("Does not take a value.");

            } else {

                // If the option does not take a value it must be a boolean so force it true
                parsedOptionParameter = new ParsedOption<T>(optionParameter, Boolean.TRUE.toString());
            }
        }
    }

    return parsedOptionParameter;
}

From source file:org.shaman.terrain.polygonal.PolygonalMapGenerator.java

private void findOceans() {
    for (Graph.Center c : graph.centers) {
        c.ocean = false;/*from ww  w.  j  ava  2  s  .c om*/
        c.water = false;
    }
    for (Graph.Corner c : graph.corners) {
        c.ocean = false;
    }
    //set water parameter of centers
    float LAKE_THRESHOLD = 0.3f;
    Queue<Graph.Center> queue = new ArrayDeque<>();
    for (Graph.Center p : graph.centers) {
        int numWater = 0;
        for (Graph.Corner c : p.corners) {
            if (c.border || c.ocean) {
                p.border = true;
                p.water = true;
                p.ocean = true;
                queue.add(p);
                break;
            }
            if (c.water) {
                numWater++;
            }
        }
        p.water = (p.ocean || numWater >= p.corners.size() * LAKE_THRESHOLD);
    }
    LOG.info("border cells: " + queue.size());
    //float fill borders to distinguish between ocean and likes
    while (!queue.isEmpty()) {
        Graph.Center c = queue.poll();
        for (Graph.Center r : c.neighbors) {
            if (r.water && !r.ocean) {
                r.ocean = true;
                queue.add(r);
            }
        }
    }
    //assign coast tag
    for (Graph.Corner q : graph.corners) {
        q.coast = false;
    }
    for (Graph.Center c : graph.centers) {
        if (c.ocean) {
            for (Graph.Corner q : c.corners) {
                if (!q.water) {
                    q.coast = true;
                } else {
                    q.ocean = true;
                }
            }
        }
    }
    //assign basic biomes
    int oceanCount = 0;
    int lakeCount = 0;
    int landCount = 0;
    for (Graph.Center c : graph.centers) {
        if (c.ocean) {
            c.biome = Biome.OCEAN;
            oceanCount++;
        } else if (c.water) {
            c.biome = Biome.LAKE;
            lakeCount++;
        } else {
            c.biome = Biome.BEACH;
            lakeCount++;
        }
    }
    LOG.log(Level.INFO, "ocean cells: {0}, lake cells: {1}, land cells: {2}",
            new Object[] { oceanCount, lakeCount, landCount });
}

From source file:io.uploader.drive.drive.DriveOperations.java

private static void uploadFiles(OperationResult operationResult, Map<Path, File> localPathDriveFileMapping,
        Drive client, Path srcDir, boolean overwrite, final StopRequester stopRequester,
        final HasStatusReporter statusReporter) throws IOException {

    Queue<Path> filesQueue = io.uploader.drive.util.FileUtils.getAllFilesPath(srcDir,
            FileFinderOption.FILE_ONLY);

    int count = 0;
    for (Path path : filesQueue) {
        try {//  w  ww.  j a v  a  2 s  . c o m
            if (statusReporter != null) {
                BasicFileAttributes attr = io.uploader.drive.util.FileUtils.getFileAttr(path);
                StringBuilder sb = new StringBuilder();
                sb.append("Transfering files (");
                sb.append(path.getFileName().toString());
                if (attr != null) {
                    sb.append(" - size: ");
                    sb.append(io.uploader.drive.util.FileUtils.humanReadableByteCount(attr.size(), true));
                }
                sb.append(")");
                statusReporter.setStatus(sb.toString());
            }

            if (hasStopBeenRequested(stopRequester)) {
                if (statusReporter != null) {
                    statusReporter.setStatus("Stopped!");
                }
                operationResult.setStatus(OperationCompletionStatus.STOPPED);
                return;
            }

            final File driveParent = localPathDriveFileMapping.get(path.getParent());
            if (driveParent == null) {
                throw new IllegalStateException(
                        "The path " + path.toString() + " does not have any parent in the drive (parent path "
                                + path.getParent().toString() + ")...");
            }

            InputStreamProgressFilter.StreamProgressCallback progressCallback = null;
            if (statusReporter != null) {
                progressCallback = new InputStreamProgressFilter.StreamProgressCallback() {

                    @Override
                    public void onStreamProgress(double progress) {
                        if (statusReporter != null) {
                            statusReporter.setCurrentProgress(progress);
                        }
                    }
                };
            }
            uploadFile(operationResult, client, driveParent, path, overwrite, progressCallback);

            ++count;
            if (statusReporter != null) {
                double p = ((double) count) / filesQueue.size();
                statusReporter.setTotalProgress(p);
                statusReporter.setStatus("Transfering files...");
            }
        } catch (Throwable e) {
            logger.error("Error occurred while transfering the file " + path.toString(), e);
            operationResult.setStatus(OperationCompletionStatus.ERROR);
            operationResult.addError(path, e);
        }
    }
}

From source file:org.apache.gobblin.example.wikipedia.WikipediaExtractor.java

private Queue<JsonElement> retrievePageRevisions(Map<String, String> query)
        throws IOException, URISyntaxException {

    Queue<JsonElement> retrievedRevisions = new LinkedList<>();

    JsonElement jsonElement = performHttpQuery(this.rootUrl, query);

    if (jsonElement == null || !jsonElement.isJsonObject()) {
        return retrievedRevisions;
    }//from   ww w .j  av  a  2  s .com

    JsonObject jsonObj = jsonElement.getAsJsonObject();
    if (jsonObj == null || !jsonObj.has(JSON_MEMBER_QUERY)) {
        return retrievedRevisions;
    }

    JsonObject queryObj = jsonObj.getAsJsonObject(JSON_MEMBER_QUERY);
    if (!queryObj.has(JSON_MEMBER_PAGES)) {
        return retrievedRevisions;
    }

    JsonObject pagesObj = queryObj.getAsJsonObject(JSON_MEMBER_PAGES);
    if (pagesObj.entrySet().isEmpty()) {
        return retrievedRevisions;
    }

    JsonObject pageIdObj = pagesObj.getAsJsonObject(pagesObj.entrySet().iterator().next().getKey());
    if (!pageIdObj.has(JSON_MEMBER_REVISIONS)) {
        return retrievedRevisions;
    }

    //retrieve revisions of the current pageTitle
    JsonArray jsonArr = pageIdObj.getAsJsonArray(JSON_MEMBER_REVISIONS);
    for (JsonElement revElement : jsonArr) {
        JsonObject revObj = revElement.getAsJsonObject();

        /*'pageid' and 'title' are associated with the parent object
         * of all revisions. Add them to each individual revision.
         */
        if (pageIdObj.has(JSON_MEMBER_PAGEID)) {
            revObj.add(JSON_MEMBER_PAGEID, pageIdObj.get(JSON_MEMBER_PAGEID));
        }
        if (pageIdObj.has(JSON_MEMBER_TITLE)) {
            revObj.add(JSON_MEMBER_TITLE, pageIdObj.get(JSON_MEMBER_TITLE));
        }
        retrievedRevisions.add(revObj);
    }

    LOG.info(retrievedRevisions.size() + " record(s) retrieved for title " + this.requestedTitle);
    return retrievedRevisions;
}

From source file:net.cellcloud.talk.TalkService.java

/** ???
 *///from   w  w w. jav  a 2 s  .c  o m
protected void noticeResume(Cellet cellet, String targetTag, Queue<Long> timestampQueue,
        Queue<Primitive> primitiveQueue, long startTime) {
    TalkSessionContext context = this.tagContexts.get(targetTag);
    if (null == context) {
        if (Logger.isDebugLevel()) {
            Logger.d(TalkService.class, "Not find session by remote tag");
        }
        return;
    }

    Message message = null;

    synchronized (context) {
        // ?
        TalkTracker tracker = context.getTracker();
        // ?? Cellet
        if (tracker.getCellet(cellet.getFeature().getIdentifier()) == cellet) {
            Session session = context.getLastSession();

            // ??
            for (int i = 0, size = timestampQueue.size(); i < size; ++i) {
                Long timestamp = timestampQueue.poll();
                Primitive primitive = primitiveQueue.poll();
                if (timestamp.longValue() >= startTime) {
                    message = this.packetResume(targetTag, timestamp, primitive);
                    if (null != message) {
                        session.write(message);
                    }
                }
            }
        }
    }
}

From source file:org.codice.ddf.opensearch.source.OpenSearchSource.java

/**
 * Method to combine spatial searches into either geometry collection or a bounding box.
 * OpenSearch endpoints and the query framework allow for multiple spatial query parameters. This
 * method has been refactored out and is protected so that downstream projects may try to
 * implement another algorithm (e.g. best-effort) to combine searches.
 *
 * @return null if there is no search specified, or a {@linkSpatialSearch} with one search that is
 *     the combination of all of the spatial criteria
 *///  w w w .j  a v a2 s .co m
@Nullable
protected SpatialSearch createCombinedSpatialSearch(final Queue<PointRadius> pointRadiusSearches,
        final Queue<Geometry> geometrySearches, final int numMultiPointRadiusVertices,
        final int distanceTolerance) {
    Geometry geometrySearch = null;
    BoundingBox boundingBox = null;
    PointRadius pointRadius = null;
    SpatialSearch spatialSearch = null;

    Set<Geometry> combinedGeometrySearches = new HashSet<>(geometrySearches);

    if (CollectionUtils.isNotEmpty(pointRadiusSearches)) {
        if (shouldConvertToBBox) {
            for (PointRadius search : pointRadiusSearches) {
                BoundingBox bbox = BoundingBoxUtils.createBoundingBox(search);
                List bboxCoordinate = BoundingBoxUtils.getBoundingBoxCoordinatesList(bbox);
                List<List> coordinates = new ArrayList<>();
                coordinates.add(bboxCoordinate);
                combinedGeometrySearches.add(ddf.geo.formatter.Polygon.buildPolygon(coordinates));
                LOGGER.trace(
                        "Point radius searches are converted to a (rough approximation) square using Vincenty's formula (direct)");
            }
        } else {
            if (pointRadiusSearches.size() == 1) {
                pointRadius = pointRadiusSearches.remove();
            } else {
                for (PointRadius search : pointRadiusSearches) {
                    Geometry circle = GeospatialUtil.createCirclePolygon(search.getLat(), search.getLon(),
                            search.getRadius(), numMultiPointRadiusVertices, distanceTolerance);
                    combinedGeometrySearches.add(circle);
                    LOGGER.trace("Point radius searches are converted to a polygon with a max of {} vertices.",
                            numMultiPointRadiusVertices);
                }
            }
        }
    }

    if (CollectionUtils.isNotEmpty(combinedGeometrySearches)) {
        // if there is more than one geometry, create a geometry collection
        if (combinedGeometrySearches.size() > 1) {
            geometrySearch = GEOMETRY_FACTORY
                    .createGeometryCollection(combinedGeometrySearches.toArray(new Geometry[0]));
        } else {
            geometrySearch = combinedGeometrySearches.iterator().next();
        }

        /**
         * If convert to bounding box is enabled, extracts the approximate envelope. In the case of
         * multiple geometry, a large approximate envelope encompassing all of the geometry is
         * returned. Area between the geometries are also included in this spatial search. Hence widen
         * the search area.
         */
        if (shouldConvertToBBox) {
            if (combinedGeometrySearches.size() > 1) {
                LOGGER.trace(
                        "An approximate envelope encompassing all the geometry is returned. Area between the geometries are also included in this spatial search. Hence widen the search area.");
            }
            boundingBox = BoundingBoxUtils.createBoundingBox((Polygon) geometrySearch.getEnvelope());
            geometrySearch = null;
        }
    }

    if (geometrySearch != null || boundingBox != null || pointRadius != null) {
        // Geo Draft 2 default always geometry instead of polygon
        spatialSearch = new SpatialSearch(geometrySearch, boundingBox, null, pointRadius);
    }
    return spatialSearch;
}

From source file:org.glassfish.jersey.examples.sseitemstore.jaxrs.JaxrsItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link JaxrsItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *//*from w  w  w  .  ja va  2s  . c  om*/
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS);
    final SseEventSource[] sources = new SseEventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final SseEventSource es = SseEventSource.target(itemsTarget.path("events")).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<>();
        indexQueues.add(indexes);

        es.register(inboundEvent -> {
            try {
                if (null == inboundEvent.getName()) {
                    final String data = inboundEvent.readData();
                    LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data="
                            + data);
                    indexes.add(items.indexOf(data));
                } else if ("size".equals(inboundEvent.getName())) {
                    sizeEventsCount.incrementAndGet();
                }
            } catch (Exception ex) {
                LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                indexes.add(-999);
            } finally {
                latch.countDown();
            }
        });
    }

    try {
        open(sources);
        items.forEach((item) -> postItem(itemsTarget, item));

        assertTrue("Waiting to receive all events has timed out.",
                latch.await((1000 + MAX_LISTENERS * RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    items.forEach(
            (item) -> assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item)));

    final AtomicInteger queueId = new AtomicInteger(0);
    indexQueues.forEach((indexes) -> {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId.get(),
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId.get(), items.size(),
                indexes.size());
        queueId.incrementAndGet();
    });

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:org.deeplearning4j.patent.TrainPatentClassifier.java

/**
 * JCommander entry point//from ww w.  j  a v a2 s. c  om
 */
protected void entryPoint(String[] args) throws Exception {
    JCommanderUtils.parseArgs(this, args);

    //Azure storage account naming rules: https://blogs.msdn.microsoft.com/jmstall/2014/06/12/azure-storage-naming-rules/
    //The default exceptions aren't helpful, we'll validate this here
    if (!azureStorageAcct.matches("^[a-z0-9]+$") || azureStorageAcct.length() < 3
            || azureStorageAcct.length() > 24) {
        throw new IllegalStateException("Invalid storage account name: must be alphanumeric, lowercase, "
                + "3 to 24 characters. Got option azureStorageAcct=\"" + azureStorageAcct + "\"");
    }
    if (!azureContainerPreproc.matches("^[a-z0-9-]+$") || azureContainerPreproc.length() < 3
            || azureContainerPreproc.length() > 63) {
        throw new IllegalStateException(
                "Invalid Azure container name: must be alphanumeric or dash, lowercase, "
                        + "3 to 63 characters. Got option azureContainerPreproc=\"" + azureContainerPreproc
                        + "\"");
    }

    StringBuilder results = new StringBuilder(); //To store results/timing - will be written to disk on completion

    long startTime = System.currentTimeMillis();

    // Prepare neural net
    ComputationGraph net = new ComputationGraph(NetworkConfiguration.getConf());
    net.init();
    log.info("Parameters: {}", net.params().length());

    // Configure Spark
    SparkConf sparkConf = new SparkConf();
    sparkConf.setAppName(sparkAppName);
    JavaSparkContext sc = new JavaSparkContext();
    int numWorkers = this.numNodes * this.numWorkersPerNode;

    //Prepare dataset RDDs
    String dirName = "seqLength" + maxSequenceLength + "_mb" + minibatch;
    String containerRoot = "wasbs://" + azureContainerPreproc + "@" + azureStorageAcct
            + ".blob.core.windows.net/";
    String baseOutPath = containerRoot + dirName;
    String trainDataPathRootDir = baseOutPath + "/train/";
    String testDataPathRootDir = baseOutPath + "/test/";
    JavaRDD<String> trainDataPaths = SparkUtils.listPaths(sc, trainDataPathRootDir);
    JavaRDD<String> testDataPaths = totalExamplesTest <= 0 ? null
            : listPathsSubset(sc, testDataPathRootDir, totalExamplesTest, 12345);
    trainDataPaths.cache();
    if (testDataPaths != null)
        testDataPaths.cache();

    //If only doing evaluation: perform it here and exit
    if (evalOnly) {
        evaluateOnly(sc, net, testDataPaths);
        return;
    }

    //Write configuration to output directory. Also determine output base directory for results
    writeConfig(sc);

    //Set up TrainingMaster for gradient sharing training
    VoidConfiguration voidConfiguration = VoidConfiguration.builder().unicastPort(port) // Should be open for IN/OUT communications on all Spark nodes
            .networkMask(networkMask) // Local network mask
            .controllerAddress(masterIP).build();
    TrainingMaster tm = new SharedTrainingMaster.Builder(voidConfiguration, minibatch).rngSeed(12345)
            .collectTrainingStats(false).batchSizePerWorker(minibatch) // Minibatch size for each worker
            .workersPerNode(numWorkersPerNode) // Workers per node
            .thresholdAlgorithm(new AdaptiveThresholdAlgorithm(gradientThreshold)).build();
    tm.setCollectTrainingStats(false);

    //If continueTraining==true and checkpoints are available available: Load checkpoint to continue training
    int firstSubsetIdx = 0;
    if (continueTraining) {
        Pair<Integer, ComputationGraph> p = loadCheckpoint();
        if (p != null) {
            firstSubsetIdx = p.getFirst();
            net = p.getSecond();
        }
    }

    //Setup saving of parameter snapshots. This is so we can calculate accuracy vs. time
    final AtomicBoolean isTraining = new AtomicBoolean(false);
    final File baseParamSaveDir = new File(outputPath, "paramSnapshots");
    if (!baseParamSaveDir.exists())
        baseParamSaveDir.mkdirs();

    //Prepare Spark version of neural net
    SparkComputationGraph sparkNet = new SparkComputationGraph(sc, net, tm);
    sparkNet.setCollectTrainingStats(tm.getIsCollectTrainingStats());

    // Add listeners
    sparkNet.setListeners(new PerformanceListener(listenerFrequency, true));

    // Time setup
    long endTimeMs = System.currentTimeMillis();
    double elapsedSec = (endTimeMs - startTime) / MILLISEC_PER_SEC;
    log.info("Setup timing: {} s", elapsedSec);
    results.append("Setup timing: ").append(elapsedSec).append(" sec\n");

    String resultsFile = FilenameUtils.concat(outputPath, "results.txt");
    if (new File(resultsFile).exists()) {
        String str = "\n\n\n============================================================================"
                + results.toString();
        FileUtils.writeStringToFile(new File(resultsFile), str, Charset.forName("UTF-8"), true);
    } else {
        FileUtils.writeStringToFile(new File(resultsFile), results.toString(), Charset.forName("UTF-8"));
    }

    //Random split into RDDs of exactly "convNumBatches" objects
    long countTrain = trainDataPaths.count();
    JavaRDD<String>[] trainSubsets;
    if (batchesBtwCheckpoints > 1) {
        trainSubsets = SparkUtils.balancedRandomSplit((int) countTrain, batchesBtwCheckpoints, trainDataPaths);
    } else {
        trainSubsets = (JavaRDD<String>[]) new JavaRDD[] { trainDataPaths };
    }

    DataSetLoader datasetLoader = new LoadDataSetsFunction(wordVectorsPath,
            PatentLabelGenerator.classLabelFilteredCounts().size(), 300);

    //Before training starts: start the thread to track convergence. This thread asyncronously saves params periodically for later evaluation
    AtomicInteger currentSubset = new AtomicInteger(0);
    Queue<ToEval> toEvalQueue = ConvergenceRunnable.startConvergenceThread(baseParamSaveDir, currentSubset,
            isTraining, saveFreqSec, sparkNet.getNetwork().params());
    log.info("Network saving thread started: saving copy every {} sec", saveFreqSec);

    boolean firstSave = true;
    long startTrain = System.currentTimeMillis();
    for (int epoch = 0; epoch < numEpochs; epoch++) {
        for (int i = firstSubsetIdx; i < trainSubsets.length; i++) {
            currentSubset.set(i);
            log.info("Starting training: epoch {} of {}, subset {} of {} ({} minibatches)", (epoch + 1),
                    numEpochs, (i + 1), trainSubsets.length, batchesBtwCheckpoints);
            long start = System.currentTimeMillis();
            isTraining.set(true);
            sparkNet.fitPaths(trainSubsets[i], datasetLoader);
            isTraining.set(false);
            long end = System.currentTimeMillis();
            log.info("Finished training: epoch {} of {}, subset {} of {} ({} minibatches) in {} sec",
                    (epoch + 1), numEpochs, (i + 1), trainSubsets.length, batchesBtwCheckpoints,
                    (end - start) / 1000);

            String fileName = "netCheckpoint_" + System.currentTimeMillis() + "_epoch" + epoch + "_subset" + i
                    + ".zip";
            String outpath = FilenameUtils.concat(outputPath, "nets/" + fileName);
            File f = new File(outpath);
            if (firstSave) {
                firstSave = false;
                f.getParentFile().mkdirs();
            }
            ModelSerializer.writeModel(sparkNet.getNetwork(), f, true);
            log.info("Saved network checkpoint to {}", outpath);

            //Now, evaluate the saved checkpoint files
            List<ToEval> toEval = new ArrayList<>();
            while (toEvalQueue.size() > 0) {
                toEval.add(toEvalQueue.remove());
            }

            if (totalExamplesTest > 0 && toEval.size() > 0) {
                log.info("Starting evaluation of {} checkpoint files", toEval.size());
                ComputationGraph cgForEval = sparkNet.getNetwork().clone();
                SparkComputationGraph scgForEval = new SparkComputationGraph(sc, cgForEval, null);
                for (ToEval te : toEval) {
                    INDArray params = Nd4j.readBinary(te.getFile());
                    cgForEval.params().assign(params);

                    long startEval = System.currentTimeMillis();
                    IEvaluation[] evals = scgForEval.doEvaluation(testDataPaths, 4, minibatch, datasetLoader,
                            new Evaluation());
                    long endEval = System.currentTimeMillis();

                    StringBuilder sb = new StringBuilder();
                    Evaluation e = (Evaluation) evals[0];
                    sb.append("network ").append(te.getCount()).append(" trainingMs ")
                            .append(te.getDurationSoFar()).append(" evalMS ").append(endEval - startEval)
                            .append(" accuracy ").append(e.accuracy()).append(" f1 ").append(e.f1())
                            .append("\n");

                    FileUtils.writeStringToFile(new File(resultsFile), sb.toString(), Charset.forName("UTF-8"),
                            true); //Append new output to file
                    saveEvaluation(false, evals, sc);
                    log.info("Evaluation: {}", sb.toString());

                }
            }

            if (maxRuntimeSec > 0
                    && (System.currentTimeMillis() - startTrain) / MILLISEC_PER_SEC > maxRuntimeSec) {
                log.info("Terminating due to exceeding max runtime");
                epoch = numEpochs;
                break;
            }
        }
        firstSubsetIdx = 0;
    }

    log.info("----- Example Complete -----");
    sc.stop();
    System.exit(0);
}