Example usage for java.util Queue add

List of usage examples for java.util Queue add

Introduction

In this page you can find the example usage for java.util Queue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:com.searchcode.app.jobs.IndexGitRepoJob.java

/**
 * Indexes all the documents in the repository changed file effectively performing a delta update
 * Should only be called when there is a genuine update IE something was indexed previously and
 * has has a new commit./*from  ww  w. j ava  2  s .co  m*/
 */
public void indexDocsByDelta(Path path, String repoName, String repoLocations, String repoRemoteLocation,
        RepositoryChanged repositoryChanged) {
    SearchcodeLib scl = Singleton.getSearchCodeLib(); // Should have data object by this point
    Queue<CodeIndexDocument> codeIndexDocumentQueue = Singleton.getCodeIndexQueue();
    String fileRepoLocations = FilenameUtils.separatorsToUnix(repoLocations);

    for (String changedFile : repositoryChanged.getChangedFiles()) {

        while (CodeIndexer.shouldPauseAdding()) {
            Singleton.getLogger().info("Pausing parser.");
            try {
                Thread.sleep(SLEEPTIME);
            } catch (InterruptedException ex) {
            }
        }

        String[] split = changedFile.split("/");
        String fileName = split[split.length - 1];
        changedFile = fileRepoLocations + "/" + repoName + "/" + changedFile;

        String md5Hash = Values.EMPTYSTRING;
        List<String> codeLines = null;

        try {
            codeLines = Helpers.readFileLines(changedFile, this.MAXFILELINEDEPTH);
        } catch (IOException ex) {
            Singleton.getLogger().warning("ERROR - caught a " + ex.getClass() + " in " + this.getClass()
                    + "\n with message: " + ex.getMessage());
            break;
        }

        try {
            FileInputStream fis = new FileInputStream(new File(changedFile));
            md5Hash = org.apache.commons.codec.digest.DigestUtils.md5Hex(fis);
            fis.close();
        } catch (IOException ex) {
            Singleton.getLogger().warning("Unable to generate MD5 for " + changedFile);
        }

        if (scl.isMinified(codeLines)) {
            Singleton.getLogger().info("Appears to be minified will not index  " + changedFile);
            break;
        }

        String languageName = scl.languageGuesser(changedFile, codeLines);
        String fileLocation = changedFile.replace(fileRepoLocations, Values.EMPTYSTRING).replace(fileName,
                Values.EMPTYSTRING);
        String fileLocationFilename = changedFile.replace(fileRepoLocations, Values.EMPTYSTRING);
        String repoLocationRepoNameLocationFilename = changedFile;

        String newString = getBlameFilePath(fileLocationFilename);
        List<CodeOwner> owners;
        if (this.USESYSTEMGIT) {
            owners = getBlameInfoExternal(codeLines.size(), repoName, fileRepoLocations, newString);
        } else {
            owners = getBlameInfo(codeLines.size(), repoName, fileRepoLocations, newString);
        }
        String codeOwner = scl.codeOwner(owners);

        if (codeLines != null) {
            if (this.LOWMEMORY) {
                try {
                    CodeIndexer.indexDocument(new CodeIndexDocument(repoLocationRepoNameLocationFilename,
                            repoName, fileName, fileLocation, fileLocationFilename, md5Hash, languageName,
                            codeLines.size(), StringUtils.join(codeLines, " "), repoRemoteLocation, codeOwner));
                } catch (IOException ex) {
                    Singleton.getLogger().warning("ERROR - caught a " + ex.getClass() + " in " + this.getClass()
                            + "\n with message: " + ex.getMessage());
                }
            } else {
                Singleton.incrementCodeIndexLinesCount(codeLines.size());
                codeIndexDocumentQueue.add(new CodeIndexDocument(repoLocationRepoNameLocationFilename, repoName,
                        fileName, fileLocation, fileLocationFilename, md5Hash, languageName, codeLines.size(),
                        StringUtils.join(codeLines, " "), repoRemoteLocation, codeOwner));
            }
        }
    }

    for (String deletedFile : repositoryChanged.getDeletedFiles()) {
        Singleton.getLogger().info("Missing from disk, removing from index " + deletedFile);
        try {
            CodeIndexer.deleteByFileLocationFilename(deletedFile);
        } catch (IOException ex) {
            Singleton.getLogger().warning("ERROR - caught a " + ex.getClass() + " in " + this.getClass()
                    + "\n with message: " + ex.getMessage());
        }
    }
}

From source file:org.apache.hadoop.hive.ql.QueryPlan.java

/**
 * Populate api.QueryPlan from exec structures. This includes constructing the
 * dependency graphs of stages and operators.
 *
 * @throws IOException/*w  w  w  .  j  a va 2s  .c o  m*/
 */
private void populateQueryPlan() throws IOException {
    query.setStageGraph(new org.apache.hadoop.hive.ql.plan.api.Graph());
    query.getStageGraph().setNodeType(NodeType.STAGE);

    Queue<Task<? extends Serializable>> tasksToVisit = new LinkedList<Task<? extends Serializable>>();
    Set<Task<? extends Serializable>> tasksVisited = new HashSet<Task<? extends Serializable>>();
    tasksToVisit.addAll(rootTasks);
    while (tasksToVisit.size() != 0) {
        Task<? extends Serializable> task = tasksToVisit.remove();
        tasksVisited.add(task);
        // populate stage
        org.apache.hadoop.hive.ql.plan.api.Stage stage = new org.apache.hadoop.hive.ql.plan.api.Stage();
        stage.setStageId(task.getId());
        stage.setStageType(task.getType());
        query.addToStageList(stage);

        if (task instanceof ExecDriver) {
            // populate map task
            ExecDriver mrTask = (ExecDriver) task;
            org.apache.hadoop.hive.ql.plan.api.Task mapTask = new org.apache.hadoop.hive.ql.plan.api.Task();
            mapTask.setTaskId(stage.getStageId() + "_MAP");
            mapTask.setTaskType(TaskType.MAP);
            stage.addToTaskList(mapTask);
            populateOperatorGraph(mapTask, mrTask.getWork().getMapWork().getAliasToWork().values());

            // populate reduce task
            if (mrTask.hasReduce()) {
                org.apache.hadoop.hive.ql.plan.api.Task reduceTask = new org.apache.hadoop.hive.ql.plan.api.Task();
                reduceTask.setTaskId(stage.getStageId() + "_REDUCE");
                reduceTask.setTaskType(TaskType.REDUCE);
                stage.addToTaskList(reduceTask);
                Collection<Operator<? extends OperatorDesc>> reducerTopOps = new ArrayList<Operator<? extends OperatorDesc>>();
                reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer());
                populateOperatorGraph(reduceTask, reducerTopOps);
            }
        } else {
            org.apache.hadoop.hive.ql.plan.api.Task otherTask = new org.apache.hadoop.hive.ql.plan.api.Task();
            otherTask.setTaskId(stage.getStageId() + "_OTHER");
            otherTask.setTaskType(TaskType.OTHER);
            stage.addToTaskList(otherTask);
        }
        if (task instanceof ConditionalTask) {
            org.apache.hadoop.hive.ql.plan.api.Adjacency listEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency();
            listEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE);
            listEntry.setNode(task.getId());
            ConditionalTask t = (ConditionalTask) task;

            for (Task<? extends Serializable> listTask : t.getListTasks()) {
                if (t.getChildTasks() != null) {
                    org.apache.hadoop.hive.ql.plan.api.Adjacency childEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency();
                    childEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE);
                    childEntry.setNode(listTask.getId());
                    // done processing the task
                    for (Task<? extends Serializable> childTask : t.getChildTasks()) {
                        childEntry.addToChildren(childTask.getId());
                        if (!tasksVisited.contains(childTask)) {
                            tasksToVisit.add(childTask);
                        }
                    }
                    query.getStageGraph().addToAdjacencyList(childEntry);
                }

                listEntry.addToChildren(listTask.getId());
                if (!tasksVisited.contains(listTask)) {
                    tasksToVisit.add(listTask);
                }
            }
            query.getStageGraph().addToAdjacencyList(listEntry);
        } else if (task.getChildTasks() != null) {
            org.apache.hadoop.hive.ql.plan.api.Adjacency entry = new org.apache.hadoop.hive.ql.plan.api.Adjacency();
            entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE);
            entry.setNode(task.getId());
            // done processing the task
            for (Task<? extends Serializable> childTask : task.getChildTasks()) {
                entry.addToChildren(childTask.getId());
                if (!tasksVisited.contains(childTask)) {
                    tasksToVisit.add(childTask);
                }
            }
            query.getStageGraph().addToAdjacencyList(entry);
        }
    }
}

From source file:org.geoserver.wms.legendgraphic.ColorMapLegendCreator.java

private Queue<BufferedImage> createBody() {

    final Queue<BufferedImage> queue = new LinkedList<BufferedImage>();

    ///*w  ww  . ja  v a  2 s.  c om*/
    // draw the various elements
    //
    // create the boxes for drawing later
    final int rowHeight = (int) Math.round(rowH);
    final int colorWidth = (int) Math.round(colorW);
    final int ruleWidth = (int) Math.round(ruleW);
    final int labelWidth = (int) Math.round(labelW);
    final Rectangle clipboxA = new Rectangle(0, 0, colorWidth, rowHeight);
    final Rectangle clipboxB = new Rectangle(0, 0, ruleWidth, rowHeight);
    final Rectangle clipboxC = new Rectangle(0, 0, labelWidth, rowHeight);

    //
    // Body
    //
    //
    // draw the various bodyCells
    for (ColorMapEntryLegendBuilder row : bodyRows) {

        //
        // row number i
        //
        // get element for color default behavior
        final Cell colorCell = row.getColorManager();
        // draw it
        final BufferedImage colorCellLegend = new BufferedImage(colorWidth, rowHeight,
                BufferedImage.TYPE_INT_ARGB);
        Graphics2D rlg = colorCellLegend.createGraphics();
        rlg.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
        rlg.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_ON);
        colorCell.draw(rlg, clipboxA, border);
        rlg.dispose();

        BufferedImage ruleCellLegend = null;
        if (forceRule) {
            // get element for rule
            final Cell ruleCell = row.getRuleManager();
            // draw it
            ruleCellLegend = new BufferedImage(ruleWidth, rowHeight, BufferedImage.TYPE_INT_ARGB);
            rlg = ruleCellLegend.createGraphics();
            rlg.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
            rlg.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_ON);
            ruleCell.draw(rlg, clipboxB, borderRule);
            rlg.dispose();
        }

        // draw it if it is present
        if (labelWidth > 0) {
            // get element for label
            final Cell labelCell = row.getLabelManager();
            if (labelCell != null) {
                final BufferedImage labelCellLegend = new BufferedImage(labelWidth, rowHeight,
                        BufferedImage.TYPE_INT_ARGB);
                rlg = labelCellLegend.createGraphics();
                rlg.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
                rlg.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING,
                        RenderingHints.VALUE_TEXT_ANTIALIAS_ON);
                labelCell.draw(rlg, clipboxC, borderLabel);
                rlg.dispose();

                //
                // merge the bodyCells for this row
                //
                //

                final Map<Key, Object> hintsMap = new HashMap<Key, Object>();
                queue.add(LegendUtils.hMergeBufferedImages(colorCellLegend, ruleCellLegend, labelCellLegend,
                        hintsMap, transparent, backgroundColor, dx));
            } else {
                final Map<Key, Object> hintsMap = new HashMap<Key, Object>();
                queue.add(LegendUtils.hMergeBufferedImages(colorCellLegend, ruleCellLegend, null, hintsMap,
                        transparent, backgroundColor, dx));
            }

        } else {
            //
            // merge the bodyCells for this row
            //
            //

            final Map<Key, Object> hintsMap = new HashMap<Key, Object>();
            queue.add(LegendUtils.hMergeBufferedImages(colorCellLegend, ruleCellLegend, null, hintsMap,
                    transparent, backgroundColor, dx));

        }

    }

    // return the list of legends
    return queue;// mergeRows(queue);
}

From source file:io.seldon.spark.actions.GroupActionsJob.java

public static void run(CmdLineArgs cmdLineArgs) {
    long unixDays = 0;
    try {// w  ww.j ava2s. c o m
        unixDays = JobUtils.dateToUnixDays(cmdLineArgs.input_date_string);
    } catch (ParseException e) {
        unixDays = 0;
    }
    System.out.println(String.format("--- started GroupActionsJob date[%s] unixDays[%s] ---",
            cmdLineArgs.input_date_string, unixDays));

    System.out.println("Env: " + System.getenv());
    System.out.println("Properties: " + System.getProperties());

    SparkConf sparkConf = new SparkConf().setAppName("GroupActionsJob");

    if (cmdLineArgs.debug_use_local_master) {
        System.out.println("Using 'local' master");
        sparkConf.setMaster("local");
    }

    Tuple2<String, String>[] sparkConfPairs = sparkConf.getAll();
    System.out.println("--- sparkConf ---");
    for (int i = 0; i < sparkConfPairs.length; i++) {
        Tuple2<String, String> kvPair = sparkConfPairs[i];
        System.out.println(String.format("%s:%s", kvPair._1, kvPair._2));
    }
    System.out.println("-----------------");

    JavaSparkContext jsc = new JavaSparkContext(sparkConf);
    { // setup aws access
        Configuration hadoopConf = jsc.hadoopConfiguration();
        hadoopConf.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem");
        if (cmdLineArgs.aws_access_key_id != null && !"".equals(cmdLineArgs.aws_access_key_id)) {
            hadoopConf.set("fs.s3n.awsAccessKeyId", cmdLineArgs.aws_access_key_id);
            hadoopConf.set("fs.s3n.awsSecretAccessKey", cmdLineArgs.aws_secret_access_key);
        }
    }

    // String output_path_dir = "./out/" + input_date_string + "-" + UUID.randomUUID();

    JavaRDD<String> dataSet = jsc.textFile(
            JobUtils.getSourceDirFromDate(cmdLineArgs.input_path_pattern, cmdLineArgs.input_date_string))
            .repartition(4);

    final ObjectMapper objectMapper = new ObjectMapper();
    objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);

    final String single_client = cmdLineArgs.single_client;
    if (single_client != null) {
        Function<String, Boolean> clientFilter = new Function<String, Boolean>() {

            @Override
            public Boolean call(String t) throws Exception {
                ActionData actionData = JobUtils.getActionDataFromActionLogLine(objectMapper, t);
                return ((actionData.client != null) && (actionData.client.equals(single_client)));
            }
        };
        dataSet = dataSet.filter(clientFilter);
    }

    JavaPairRDD<String, ActionData> pairs = dataSet.mapToPair(new PairFunction<String, String, ActionData>() {

        @Override
        public Tuple2<String, ActionData> call(String t) throws Exception {
            ActionData actionData = JobUtils.getActionDataFromActionLogLine(objectMapper, t);
            // String key = (actionData.userid == 0) ? "__no_userid__" : actionData.client;
            String key = actionData.client;
            return new Tuple2<String, ActionData>(key, actionData);
        }

    }).persist(StorageLevel.MEMORY_AND_DISK());

    List<String> clientList = pairs.keys().distinct().collect();
    Queue<ClientDetail> clientDetailQueue = new PriorityQueue<ClientDetail>(30, new Comparator<ClientDetail>() {

        @Override
        public int compare(ClientDetail o1, ClientDetail o2) {
            if (o1.itemCount > o2.itemCount) {
                return -1;
            } else if (o1.itemCount < o2.itemCount) {
                return 1;
            }
            return 0;
        }
    });
    Queue<ClientDetail> clientDetailZeroQueue = new PriorityQueue<ClientDetail>(30,
            new Comparator<ClientDetail>() {

                @Override
                public int compare(ClientDetail o1, ClientDetail o2) {
                    if (o1.itemCount > o2.itemCount) {
                        return -1;
                    } else if (o1.itemCount < o2.itemCount) {
                        return 1;
                    }
                    return 0;
                }
            });
    System.out.println("Client list " + clientList.toString());
    for (String client : clientList) {
        if (client != null) {
            System.out.println("looking at client " + client);
            final String currentClient = client;

            JavaPairRDD<String, ActionData> filtered_by_client = pairs
                    .filter(new Function<Tuple2<String, ActionData>, Boolean>() {

                        @Override
                        public Boolean call(Tuple2<String, ActionData> v1) throws Exception {
                            if (currentClient.equalsIgnoreCase(v1._1)) {
                                return Boolean.TRUE;
                            } else {
                                return Boolean.FALSE;
                            }
                        }
                    });

            JavaPairRDD<String, ActionData> nonZeroUserIds = filtered_by_client
                    .filter(new Function<Tuple2<String, ActionData>, Boolean>() {

                        @Override
                        public Boolean call(Tuple2<String, ActionData> v1) throws Exception {
                            if (v1._2.userid == 0) {
                                return Boolean.FALSE;
                            } else {
                                return Boolean.TRUE;
                            }
                        }
                    });

            JavaPairRDD<String, Integer> userIdLookupRDD = nonZeroUserIds
                    .mapToPair(new PairFunction<Tuple2<String, ActionData>, String, Integer>() {

                        @Override
                        public Tuple2<String, Integer> call(Tuple2<String, ActionData> t) throws Exception {
                            String key = currentClient + "_" + t._2.client_userid;
                            return new Tuple2<String, Integer>(key, t._2.userid);
                        }
                    });

            Map<String, Integer> userIdLookupMap = userIdLookupRDD.collectAsMap();
            Map<String, Integer> userIdLookupMap_wrapped = new HashMap<String, Integer>(userIdLookupMap);
            final Broadcast<Map<String, Integer>> broadcastVar = jsc.broadcast(userIdLookupMap_wrapped);
            JavaRDD<String> json_only_with_zeros = filtered_by_client
                    .map(new Function<Tuple2<String, ActionData>, String>() {

                        @Override
                        public String call(Tuple2<String, ActionData> v1) throws Exception {
                            Map<String, Integer> m = broadcastVar.getValue();
                            ActionData actionData = v1._2;
                            if (actionData.userid == 0) {
                                String key = currentClient + "_" + actionData.client_userid;
                                if (m.containsKey(key)) {
                                    actionData.userid = m.get(key);
                                } else {
                                    return "";
                                }
                            }
                            String json = JobUtils.getJsonFromActionData(actionData);
                            return json;
                        }
                    });

            JavaRDD<String> json_only = json_only_with_zeros.filter(new Function<String, Boolean>() {

                @Override
                public Boolean call(String v1) throws Exception {
                    return (v1.length() == 0) ? Boolean.FALSE : Boolean.TRUE;
                }
            });

            String outputPath = getOutputPath(cmdLineArgs.output_path_dir, unixDays, client);
            if (cmdLineArgs.gzip_output) {
                json_only.saveAsTextFile(outputPath, org.apache.hadoop.io.compress.GzipCodec.class);
            } else {
                json_only.saveAsTextFile(outputPath);
            }
            long json_only_count = json_only.count();
            clientDetailZeroQueue
                    .add(new ClientDetail(currentClient, json_only_with_zeros.count() - json_only_count));
            clientDetailQueue.add(new ClientDetail(currentClient, json_only_count));
        } else
            System.out.println("Found null client!");
    }

    System.out.println("- Client Action (Zero Userid) Count -");
    while (clientDetailZeroQueue.size() != 0) {
        GroupActionsJob.ClientDetail clientDetail = clientDetailZeroQueue.remove();
        System.out.println(String.format("%s: %d", clientDetail.client, clientDetail.itemCount));
    }

    System.out.println("- Client Action Count -");
    while (clientDetailQueue.size() != 0) {
        GroupActionsJob.ClientDetail clientDetail = clientDetailQueue.remove();
        System.out.println(String.format("%s: %d", clientDetail.client, clientDetail.itemCount));
    }

    jsc.stop();
    System.out.println(String.format("--- finished GroupActionsJob date[%s] unixDays[%s] ---",
            cmdLineArgs.input_date_string, unixDays));

}

From source file:io.openvidu.test.e2e.OpenViduTestAppE2eTest.java

@Test
@DisplayName("Stream property changed event")
void streamPropertyChangedEventTest() throws Exception {

    Queue<Boolean> threadAssertions = new ConcurrentLinkedQueue<Boolean>();

    setupBrowser("chromeAlternateScreenShare");

    log.info("Stream property changed event");

    WebElement oneToManyInput = user.getDriver().findElement(By.id("one2many-input"));
    oneToManyInput.clear();//from  w ww.  j  a  v a2  s .  c om
    oneToManyInput.sendKeys("1");

    user.getDriver().findElement(By.id("one2many-btn")).click();
    user.getDriver().findElement(By.className("screen-radio")).click();

    List<WebElement> joinButtons = user.getDriver().findElements(By.className("join-btn"));
    for (WebElement el : joinButtons) {
        el.sendKeys(Keys.ENTER);
    }

    user.getEventManager().waitUntilEventReaches("connectionCreated", 4);
    user.getEventManager().waitUntilEventReaches("accessAllowed", 1);
    user.getEventManager().waitUntilEventReaches("streamCreated", 2);
    user.getEventManager().waitUntilEventReaches("streamPlaying", 2);

    // Unpublish video
    final CountDownLatch latch1 = new CountDownLatch(2);
    user.getEventManager().on("streamPropertyChanged", (event) -> {
        threadAssertions.add("videoActive".equals(event.get("changedProperty").getAsString()));
        threadAssertions.add(!event.get("newValue").getAsBoolean());
        latch1.countDown();
    });
    user.getDriver().findElement(By.cssSelector("#openvidu-instance-0 .pub-video-btn")).click();
    user.getEventManager().waitUntilEventReaches("streamPropertyChanged", 2);

    if (!latch1.await(5000, TimeUnit.MILLISECONDS)) {
        gracefullyLeaveParticipants(2);
        fail();
        return;
    }

    user.getEventManager().off("streamPropertyChanged");
    log.info("Thread assertions: {}", threadAssertions.toString());
    for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) {
        Assert.assertTrue("Some Event property was wrong", iter.next());
        iter.remove();
    }

    // Unpublish audio
    final CountDownLatch latch2 = new CountDownLatch(2);
    user.getEventManager().on("streamPropertyChanged", (event) -> {
        threadAssertions.add("audioActive".equals(event.get("changedProperty").getAsString()));
        threadAssertions.add(!event.get("newValue").getAsBoolean());
        latch2.countDown();
    });
    user.getDriver().findElement(By.cssSelector("#openvidu-instance-0 .pub-audio-btn")).click();
    user.getEventManager().waitUntilEventReaches("streamPropertyChanged", 4);

    if (!latch2.await(5000, TimeUnit.MILLISECONDS)) {
        gracefullyLeaveParticipants(2);
        fail();
        return;
    }

    user.getEventManager().off("streamPropertyChanged");
    log.info("Thread assertions: {}", threadAssertions.toString());
    for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) {
        Assert.assertTrue("Some Event property was wrong", iter.next());
        iter.remove();
    }

    // Resize captured window
    final CountDownLatch latch3 = new CountDownLatch(2);
    int newWidth = 1000;
    int newHeight = 700;

    final long[] expectedWidthHeight = new long[2];

    user.getEventManager().on("streamPropertyChanged", (event) -> {
        String expectedDimensions = "{\"width\":" + expectedWidthHeight[0] + ",\"height\":"
                + expectedWidthHeight[1] + "}";
        threadAssertions.add("videoDimensions".equals(event.get("changedProperty").getAsString()));
        threadAssertions.add(expectedDimensions.equals(event.get("newValue").getAsJsonObject().toString()));
        latch3.countDown();
    });

    user.getDriver().manage().window().setSize(new Dimension(newWidth, newHeight));

    String widthAndHeight = user.getEventManager().getDimensionOfViewport();
    JSONObject obj = (JSONObject) new JSONParser().parse(widthAndHeight);

    expectedWidthHeight[0] = (long) obj.get("width");
    expectedWidthHeight[1] = (long) obj.get("height");

    System.out.println("New viewport dimension: " + obj.toJSONString());

    user.getEventManager().waitUntilEventReaches("streamPropertyChanged", 6);

    if (!latch3.await(5000, TimeUnit.MILLISECONDS)) {
        gracefullyLeaveParticipants(2);
        fail();
        return;
    }

    user.getEventManager().off("streamPropertyChanged");
    log.info("Thread assertions: {}", threadAssertions.toString());
    for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) {
        Assert.assertTrue("Some Event property was wrong", iter.next());
        iter.remove();
    }

    gracefullyLeaveParticipants(2);
}

From source file:de.tudarmstadt.ukp.dkpro.lexsemresource.graph.EntityGraphJGraphT.java

/**
 * Creates the hyponym map, that maps from nodes to their (recursive) number of hyponyms for
 * each node. "recursive" means that the hyponyms of hyponyms are also taken into account.
 *
 * @throws UnsupportedOperationException
 * @throws LexicalSemanticResourceException
 *///from w  ww. ja v a  2s  .  co m
private Map<String, Integer> getHyponymCountMap() throws LexicalSemanticResourceException {
    // do only create hyponymMap, if it was not already computed
    if (hyponymCountMap != null) {
        return hyponymCountMap;
    }

    // work on the lcc, otherwise this is not going to work
    // EntityGraphJGraphT lcc = this;
    EntityGraphJGraphT lcc = this.getLargestConnectedComponent();
    lcc.removeCycles();
    int nrOfNodes = lcc.getNumberOfNodes();

    File hyponymCountMapSerializedFile = new File(
            getGraphId() + "_" + hyponymCountMapFilename + (lexSemRes.getIsCaseSensitive() ? "-cs" : "-cis"));
    hyponymCountMap = new HashMap<String, Integer>();

    if (hyponymCountMapSerializedFile.exists()) {
        logger.info("Loading saved hyponymyCountMap ...");
        hyponymCountMap = EntityGraphUtils.deserializeMap(hyponymCountMapSerializedFile);
        if (hyponymCountMap.size() != nrOfNodes) {
            throw new LexicalSemanticResourceException(
                    "HyponymCountMap does not contain an entry for each node in the graph."
                            + hyponymCountMap.size() + "/" + nrOfNodes);
        }
        logger.info("Done loading saved hyponymyCountMap");
        return hyponymCountMap;
    }

    hyponymCountMap = new HashMap<String, Integer>();

    // a queue holding the nodes to process
    Queue<String> queue = new LinkedList<String>();

    // In the entity graph a node may have more than one father.
    // Thus, we check whether a node was already visited.
    // Then, it is not expanded again.
    Set<String> visited = new HashSet<String>();

    // initialize the queue with all leaf nodes
    Set<String> leafNodes = new HashSet<String>();
    for (Entity leaf : lcc.getLeaves()) {
        leafNodes.add(leaf.getId());
    }
    queue.addAll(leafNodes);

    logger.info(leafNodes.size() + " leaf nodes.");

    ProgressMeter progress = new ProgressMeter(getNumberOfNodes());
    // while the queue is not empty
    while (!queue.isEmpty()) {
        // remove first element from queue
        String currNodeId = queue.poll();
        Entity currNode = lexSemRes.getEntityById(currNodeId);

        // in some rare cases, getEntityById might fail - so better check for nulls and fail
        // gracefully
        if (currNode == null) {
            visited.add(currNodeId);
            hyponymCountMap.put(currNodeId, 0);
        }

        logger.debug(queue.size());

        if (visited.contains(currNodeId)) {
            continue;
        }

        progress.next();

        if (logger.isDebugEnabled()) {
            logger.debug(progress + " - " + queue.size() + " left in queue");
        } else if (logger.isInfoEnabled() && (progress.getCount() % 100 == 0)) {
            logger.info(progress + " - " + queue.size() + " left in queue");
        }

        Set<Entity> children = lcc.getChildren(currNode);
        Set<String> invalidChildIds = new HashSet<String>();
        int validChildren = 0;
        int sumChildHyponyms = 0;
        boolean invalid = false;
        for (Entity child : children) {
            if (lcc.containsVertex(child)) {
                if (hyponymCountMap.containsKey(child.getId())) {
                    sumChildHyponyms += hyponymCountMap.get(child.getId());
                    validChildren++;
                } else {
                    invalid = true;
                    invalidChildIds.add(child.getId());
                }
            }
        }

        // we cannot use continue directly if invalid as this would continue the inner loop not
        // the outer loop
        if (invalid) {
            // One of the childs is not in the hyponymCountMap yet
            // Re-Enter the node into the queue and continue with next node
            // Also enter all the childs that are not in the queue yet
            queue.add(currNodeId);
            for (String childId : invalidChildIds) {
                if (!visited.contains(childId) && !queue.contains(childId)) {
                    queue.add(childId);
                }
            }
            continue;
        }

        // mark as visited
        visited.add(currNodeId);

        // number of hyponomys of current node is the number of its own hyponyms and the sum of
        // the hyponyms of its children.
        int currNodeHyponomyCount = validChildren + sumChildHyponyms;
        hyponymCountMap.put(currNodeId, currNodeHyponomyCount);

        // add parents of current node to queue
        for (Entity parent : lcc.getParents(currNode)) {
            if (lcc.containsVertex(parent)) {
                queue.add(parent.getId());
            }
        }
    } // while queue not empty

    logger.info(visited.size() + " nodes visited");
    if (visited.size() != nrOfNodes) {
        List<Entity> missed = new ArrayList<Entity>();
        for (Entity e : lcc.getNodes()) {
            if (!visited.contains(e.getId())) {
                missed.add(e);
                System.out.println("Missed: [" + e + "]");
            }
        }

        throw new LexicalSemanticResourceException(
                "Visited only " + visited.size() + " out of " + nrOfNodes + " nodes.");
    }
    if (hyponymCountMap.size() != nrOfNodes) {
        throw new LexicalSemanticResourceException(
                "HyponymCountMap does not contain an entry for each node in the graph." + hyponymCountMap.size()
                        + "/" + nrOfNodes);
    }

    /*
     * As an EntityGraph is a graph rather than a tree, the hyponymCount for top nodes can be
     * greater than the number of nodes in the graph. This is due to the multiple counting of nodes
     * having more than one parent. Thus, we have to scale hyponym counts to fall in
     * [0,NumberOfNodes].
     */
    for (String key : hyponymCountMap.keySet()) {
        if (hyponymCountMap.get(key) > hyponymCountMap.size()) {
            // TODO scaling function is not optimal (to say the least :)
            hyponymCountMap.put(key, (hyponymCountMap.size() - 1));
        }
    }

    logger.info("Computed hyponymCountMap");
    EntityGraphUtils.serializeMap(hyponymCountMap, hyponymCountMapSerializedFile);
    logger.info("Serialized hyponymCountMap");

    return hyponymCountMap;
}

From source file:com.searchcode.app.jobs.IndexSvnRepoJob.java

/**
 * Indexes all the documents in the path provided. Will also remove anything from the index if not on disk
 * Generally this is a slow update used only for the inital clone of a repository
 * NB this can be used for updates but it will be much slower as it needs to to walk the contents of the disk
 *///  w  w w . ja  v a2  s. c om
public void indexDocsByPath(Path path, String repoName, String repoLocations, String repoRemoteLocation,
        boolean existingRepo) {
    SearchcodeLib scl = Singleton.getSearchCodeLib(); // Should have data object by this point
    List<String> fileLocations = new ArrayList<>();
    Queue<CodeIndexDocument> codeIndexDocumentQueue = Singleton.getCodeIndexQueue();

    // Convert once outside the main loop
    String fileRepoLocations = FilenameUtils.separatorsToUnix(repoLocations);
    boolean lowMemory = this.LOWMEMORY;

    try {
        Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
            @Override
            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {

                while (CodeIndexer.shouldPauseAdding()) {
                    Singleton.getLogger().info("Pausing parser.");
                    try {
                        Thread.sleep(SLEEPTIME);
                    } catch (InterruptedException ex) {
                    }
                }

                // Convert Path file to unix style that way everything is easier to reason about
                String fileParent = FilenameUtils.separatorsToUnix(file.getParent().toString());
                String fileToString = FilenameUtils.separatorsToUnix(file.toString());
                String fileName = file.getFileName().toString();
                String md5Hash = Values.EMPTYSTRING;

                if (fileParent.endsWith("/.svn") || fileParent.contains("/.svn/")) {
                    return FileVisitResult.CONTINUE;
                }

                List<String> codeLines;
                try {
                    codeLines = Helpers.readFileLines(fileToString, MAXFILELINEDEPTH);
                } catch (IOException ex) {
                    return FileVisitResult.CONTINUE;
                }

                try {
                    FileInputStream fis = new FileInputStream(new File(fileToString));
                    md5Hash = org.apache.commons.codec.digest.DigestUtils.md5Hex(fis);
                    fis.close();
                } catch (IOException ex) {
                    Singleton.getLogger().warning("Unable to generate MD5 for " + fileToString);
                }

                // is the file minified?
                if (scl.isMinified(codeLines)) {
                    Singleton.getLogger().info("Appears to be minified will not index  " + fileToString);
                    return FileVisitResult.CONTINUE;
                }

                String languageName = scl.languageGuesser(fileName, codeLines);
                String fileLocation = fileToString.replace(fileRepoLocations, Values.EMPTYSTRING)
                        .replace(fileName, Values.EMPTYSTRING);
                String fileLocationFilename = fileToString.replace(fileRepoLocations, Values.EMPTYSTRING);
                String repoLocationRepoNameLocationFilename = fileToString;

                String newString = getBlameFilePath(fileLocationFilename);
                String codeOwner = getInfoExternal(codeLines.size(), repoName, fileRepoLocations, newString)
                        .getName();

                // If low memory don't add to the queue, just index it directly
                if (lowMemory) {
                    CodeIndexer.indexDocument(new CodeIndexDocument(repoLocationRepoNameLocationFilename,
                            repoName, fileName, fileLocation, fileLocationFilename, md5Hash, languageName,
                            codeLines.size(), StringUtils.join(codeLines, " "), repoRemoteLocation, codeOwner));
                } else {
                    Singleton.incrementCodeIndexLinesCount(codeLines.size());
                    codeIndexDocumentQueue.add(new CodeIndexDocument(repoLocationRepoNameLocationFilename,
                            repoName, fileName, fileLocation, fileLocationFilename, md5Hash, languageName,
                            codeLines.size(), StringUtils.join(codeLines, " "), repoRemoteLocation, codeOwner));
                }

                fileLocations.add(fileLocationFilename);
                return FileVisitResult.CONTINUE;
            }
        });
    } catch (IOException ex) {
        Singleton.getLogger().warning("ERROR - caught a " + ex.getClass() + " in " + this.getClass()
                + "\n with message: " + ex.getMessage());
    }

    if (existingRepo) {
        CodeSearcher cs = new CodeSearcher();
        List<String> indexLocations = cs.getRepoDocuments(repoName);

        for (String file : indexLocations) {
            if (!fileLocations.contains(file)) {
                Singleton.getLogger().info("Missing from disk, removing from index " + file);
                try {
                    CodeIndexer.deleteByFileLocationFilename(file);
                } catch (IOException ex) {
                    Singleton.getLogger().warning("ERROR - caught a " + ex.getClass() + " in " + this.getClass()
                            + "\n with message: " + ex.getMessage());
                }
            }
        }
    }
}

From source file:it.geosolutions.geobatch.actions.freemarker.FreeMarkerAction.java

/**
 * Removes TemplateModelEvents from the queue and put
 *///from   ww w .ja v  a 2s  . c o m
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.started();

    listenerForwarder.setTask("initializing the FreeMarker engine");
    if (!initialized) {
        try {
            initialize();
        } catch (IllegalArgumentException e) {
            throw new ActionException(this, e.getLocalizedMessage(), e.getCause());
        } catch (IOException e) {
            throw new ActionException(this, e.getLocalizedMessage(), e.getCause());
        }
    }

    listenerForwarder.setTask("build the output absolute file name");

    // build the output absolute file name
    File outputDir = computeOutputDir(); // may throw ActionEx

    // return
    final Queue<EventObject> ret = new LinkedList<EventObject>();

    listenerForwarder.setTask("Building/getting the root data structure");
    /*
     * Building/getting the root data structure
     */
    final Map<String, Object> root = conf.getRoot() != null ? conf.getRoot() : new HashMap<String, Object>();

    // list of incoming event to inject into the root datamodel
    final List<TemplateModel> list;
    if (conf.isNtoN()) {
        list = new ArrayList<TemplateModel>(events.size());
    } else {
        list = new ArrayList<TemplateModel>(1);
    }
    // append the list of adapted event objects
    root.put(TemplateModelEvent.EVENT_KEY, list);

    while (!events.isEmpty()) {
        // the adapted event
        final TemplateModelEvent ev;
        final TemplateModel dataModel;
        try {
            if ((ev = adapter(events.remove())) != null) {
                listenerForwarder.setTask("Try to get a Template DataModel from the Adapted event");
                // try to get a Template DataModel from the Adapted event
                dataModel = ev.getModel(processor);

            } else {
                final String message = "Unable to append the event: unrecognized format. SKIPPING...";
                if (LOGGER.isErrorEnabled()) {
                    LOGGER.error(message);
                }
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    final ActionException e = new ActionException(this, message);
                    listenerForwarder.failed(e);
                    throw e;
                }
            }
        } catch (TemplateModelException tme) {
            final String message = "Unable to wrap the passed object: " + tme.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            if (conf.isFailIgnored()) {
                continue;
            } else {
                listenerForwarder.failed(tme);
                throw new ActionException(this, tme.getLocalizedMessage());
            }
        } catch (Exception ioe) {
            final String message = "Unable to produce the output: " + ioe.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            if (conf.isFailIgnored()) {
                continue;
            } else {
                listenerForwarder.failed(ioe);
                throw new ActionException(this, ioe.getLocalizedMessage(), ioe);
            }
        }

        listenerForwarder.setTask("Generating the output");
        /*
         * If getNtoN: For each data incoming event (Template DataModel)
         * build a file. Otherwise the entire queue of incoming object will
         * be transformed in a list of datamodel. In this case only one file
         * is generated.
         */
        if (conf.isNtoN()) {

            if (list.size() > 0) {
                list.remove(0);
            }
            list.add(dataModel);

            final File outputFile;
            // append the incoming data structure
            try {
                outputFile = buildOutput(outputDir, root);
            } catch (ActionException e) {
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(e.getLocalizedMessage(), e);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(e);
                    throw e;
                }
            }
            // add the file to the return
            ret.add(new FileSystemEvent(outputFile.getAbsoluteFile(), FileSystemEventType.FILE_ADDED));
        } else {
            list.add(dataModel);
        }
    }

    if (!conf.isNtoN()) {
        final File outputFile;
        // append the incoming data structure
        try {
            outputFile = buildOutput(outputDir, root);
        } catch (ActionException e) {
            if (LOGGER.isErrorEnabled())
                LOGGER.error(e.getLocalizedMessage(), e);
            listenerForwarder.failed(e);
            throw e;
        }
        // add the file to the return
        ret.add(new FileSystemEvent(outputFile.getAbsoluteFile(), FileSystemEventType.FILE_ADDED));
    }

    listenerForwarder.completed();
    return ret;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

@Override
public void asyncOffloadPrefix(Position pos, OffloadCallback callback, Object ctx) {
    PositionImpl requestOffloadTo = (PositionImpl) pos;
    if (!isValidPosition(requestOffloadTo)) {
        callback.offloadFailed(new InvalidCursorPositionException("Invalid position for offload"), ctx);
        return;//from   ww  w .jav a  2 s  . co m
    }

    PositionImpl firstUnoffloaded;

    Queue<LedgerInfo> ledgersToOffload = new ConcurrentLinkedQueue<>();
    synchronized (this) {
        log.info("[{}] Start ledgersOffload. ledgers={} totalSize={}", name, ledgers.keySet(),
                TOTAL_SIZE_UPDATER.get(this));

        if (STATE_UPDATER.get(this) == State.Closed) {
            log.info("[{}] Ignoring offload request since the managed ledger was already closed", name);
            callback.offloadFailed(new ManagedLedgerAlreadyClosedException(
                    "Can't offload closed managed ledger (" + name + ")"), ctx);
            return;
        }

        if (ledgers.isEmpty()) {
            log.info("[{}] Tried to offload a managed ledger with no ledgers, giving up", name);
            callback.offloadFailed(new ManagedLedgerAlreadyClosedException(
                    "Can't offload managed ledger (" + name + ") with no ledgers"), ctx);
            return;
        }

        long current = ledgers.lastKey();

        // the first ledger which will not be offloaded. Defaults to current,
        // in the case that the whole headmap is offloaded. Otherwise it will
        // be set as we iterate through the headmap values
        long firstLedgerRetained = current;
        for (LedgerInfo ls : ledgers.headMap(current).values()) {
            if (requestOffloadTo.getLedgerId() > ls.getLedgerId()) {
                // don't offload if ledger has already been offloaded, or is empty
                if (!ls.getOffloadContext().getComplete() && ls.getSize() > 0) {
                    ledgersToOffload.add(ls);
                }
            } else {
                firstLedgerRetained = ls.getLedgerId();
                break;
            }
        }
        firstUnoffloaded = PositionImpl.get(firstLedgerRetained, 0);
    }

    if (ledgersToOffload.isEmpty()) {
        log.info("[{}] No ledgers to offload", name);
        callback.offloadComplete(firstUnoffloaded, ctx);
        return;
    }

    if (offloadMutex.tryLock()) {
        log.info("[{}] Going to offload ledgers {}", name,
                ledgersToOffload.stream().map(l -> l.getLedgerId()).collect(Collectors.toList()));

        CompletableFuture<PositionImpl> promise = new CompletableFuture<>();
        promise.whenComplete((result, exception) -> {
            offloadMutex.unlock();
            if (exception != null) {
                callback.offloadFailed(new ManagedLedgerException(exception), ctx);
            } else {
                callback.offloadComplete(result, ctx);
            }
        });
        offloadLoop(promise, ledgersToOffload, firstUnoffloaded, Optional.empty());
    } else {
        callback.offloadFailed(
                new ManagedLedgerException.OffloadInProgressException("Offload operation already running"),
                ctx);
    }
}

From source file:org.dkpro.lab.engine.impl.MultiThreadBatchTaskEngine.java

@Override
protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig,
        Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException {
    if (log.isTraceEnabled()) {
        // Show all subtasks executed so far
        for (String est : aExecutedSubtasks) {
            log.trace("-- Already executed: " + est);
        }//from   w ww .jav a2  s  .  c o  m
    }

    // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is
    // extended as the subtasks of this batch are executed with the present configuration.
    // FIXME: That means that sub-batch-tasks in two different configurations cannot see
    // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally
    // maintained *across* configurations, so maybe the scope should also be maintained
    // *across* configurations? - REC 2014-06-15
    Set<String> scope = new HashSet<>();
    if (aConfiguration.getScope() != null) {
        scope.addAll(aConfiguration.getScope());
    }

    // Configure subtasks
    for (Task task : aConfiguration.getTasks()) {
        // Now the setup is complete
        aContext.getLifeCycleManager().configure(aContext, task, aConfig);
    }

    Queue<Task> queue = new LinkedList<>(aConfiguration.getTasks());
    // keeps track of the execution threads; 
    // TODO MW: do we really need this or can we work with the futures list only?
    Map<Task, ExecutionThread> threads = new HashMap<>();
    // keeps track of submitted Futures and their associated tasks
    Map<Future<?>, Task> futures = new HashMap<Future<?>, Task>();
    // will be instantiated with all exceptions from current loop
    ConcurrentMap<Task, Throwable> exceptionsFromLastLoop = null;
    ConcurrentMap<Task, Throwable> exceptionsFromCurrentLoop = new ConcurrentHashMap<>();

    int outerLoopCounter = 0;

    // main loop
    do {
        outerLoopCounter++;

        threads.clear();
        futures.clear();
        ExecutorService executor = Executors.newFixedThreadPool(maxThreads);

        // set the exceptions from the last loop
        exceptionsFromLastLoop = new ConcurrentHashMap<>(exceptionsFromCurrentLoop);

        // Fix MW: Clear exceptionsFromCurrentLoop; otherwise the loop with run at most twice.
        exceptionsFromCurrentLoop.clear();

        // process all tasks from the queue
        while (!queue.isEmpty()) {
            Task task = queue.poll();

            TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig,
                    aExecutedSubtasks);

            // Check if a subtask execution compatible with the present configuration has
            // does already exist ...
            if (execution == null) {
                // ... otherwise execute it with the present configuration
                log.info("Executing task [" + task.getType() + "]");

                // set scope here so that the inherited scopes are considered
                if (task instanceof BatchTask) {
                    ((BatchTask) task).setScope(scope);
                }

                ExecutionThread thread = new ExecutionThread(aContext, task, aConfig, aExecutedSubtasks);
                threads.put(task, thread);

                futures.put(executor.submit(thread), task);
            } else {
                log.debug("Using existing execution [" + execution.getId() + "]");

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

        // try and get results from all futures to check for failed executions
        for (Map.Entry<Future<?>, Task> entry : futures.entrySet()) {
            try {
                entry.getKey().get();
            } catch (java.util.concurrent.ExecutionException ex) {
                Task task = entry.getValue();
                // TODO MW: add a retry-counter here to prevent endless loops?
                log.info("Task exec failed for [" + task.getType() + "]");
                // record the failed task, so that it can be re-added to the queue
                exceptionsFromCurrentLoop.put(task, ex);
            } catch (InterruptedException ex) {
                // thread interrupted, exit
                throw new RuntimeException(ex);
            }
        }

        log.debug("Calling shutdown");
        executor.shutdown();
        log.debug("All threads finished");

        // collect the results
        for (Map.Entry<Task, ExecutionThread> entry : threads.entrySet()) {
            Task task = entry.getKey();
            ExecutionThread thread = entry.getValue();
            TaskContextMetadata execution = thread.getTaskContextMetadata();

            // probably failed
            if (execution == null) {
                Throwable exception = exceptionsFromCurrentLoop.get(task);
                if (!(exception instanceof UnresolvedImportException)
                        && !(exception instanceof java.util.concurrent.ExecutionException)) {
                    throw new RuntimeException(exception);
                }
                exceptionsFromCurrentLoop.put(task, exception);

                // re-add to the queue
                queue.add(task);
            } else {

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

    }
    // finish if the same tasks failed again
    while (!exceptionsFromCurrentLoop.keySet().equals(exceptionsFromLastLoop.keySet()));
    // END OF DO; finish if the same tasks failed again

    if (!exceptionsFromCurrentLoop.isEmpty()) {
        // collect all details
        StringBuilder details = new StringBuilder();
        for (Throwable throwable : exceptionsFromCurrentLoop.values()) {
            details.append("\n -");
            details.append(throwable.getMessage());
        }

        // we re-throw the first exception
        Throwable next = exceptionsFromCurrentLoop.values().iterator().next();
        if (next instanceof RuntimeException) {
            throw (RuntimeException) next;
        }

        // otherwise wrap it
        throw new RuntimeException(details.toString(), next);
    }
    log.info("MultiThreadBatchTask completed successfully. Total number of outer loop runs: "
            + outerLoopCounter);
}