Example usage for java.util Queue size

List of usage examples for java.util Queue size

Introduction

In this page you can find the example usage for java.util Queue size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this collection.

Usage

From source file:cn.edu.bjtu.cit.recommender.Recommender.java

@SuppressWarnings("unchecked")
public int run(String[] args) throws Exception {
    if (args.length < 2) {
        System.err.println();/*from   ww  w.  jav  a  2 s .c  o  m*/
        System.err.println("Usage: " + this.getClass().getName()
                + " [generic options] input output [profiling] [estimation] [clustersize]");
        System.err.println();
        printUsage();
        GenericOptionsParser.printGenericCommandUsage(System.err);

        return 1;
    }
    OptionParser parser = new OptionParser(args);

    Pipeline pipeline = new MRPipeline(Recommender.class, getConf());

    if (parser.hasOption(CLUSTER_SIZE)) {
        pipeline.getConfiguration().setInt(ClusterOracle.CLUSTER_SIZE,
                Integer.parseInt(parser.getOption(CLUSTER_SIZE).getValue()));
    }

    if (parser.hasOption(PROFILING)) {
        pipeline.getConfiguration().setBoolean(Profiler.IS_PROFILE, true);
        this.profileFilePath = parser.getOption(PROFILING).getValue();

    }

    if (parser.hasOption(ESTIMATION)) {
        estFile = parser.getOption(ESTIMATION).getValue();
        est = new Estimator(estFile, clusterSize);
    }

    if (parser.hasOption(OPT_REDUCE)) {
        pipeline.getConfiguration().setBoolean(OPT_REDUCE, true);
    }

    if (parser.hasOption(OPT_MSCR)) {
        pipeline.getConfiguration().setBoolean(OPT_MSCR, true);
    }

    if (parser.hasOption(ACTIVE_THRESHOLD)) {
        threshold = Integer.parseInt(parser.getOption("at").getValue());
    }

    if (parser.hasOption(TOP)) {
        top = Integer.parseInt(parser.getOption("top").getValue());
    }

    profiler = new Profiler(pipeline);
    /*
     * input node
     */
    PCollection<String> lines = pipeline.readTextFile(args[0]);

    if (profiler.isProfiling() && lines.getSize() > 10 * 1024 * 1024) {
        lines = lines.sample(0.1);
    }

    /*
     * S0 + GBK
     */
    PGroupedTable<Long, Long> userWithPrefs = lines.parallelDo(new MapFn<String, Pair<Long, Long>>() {

        @Override
        public Pair<Long, Long> map(String input) {
            String[] split = input.split(Estimator.DELM);
            long userID = Long.parseLong(split[0]);
            long itemID = Long.parseLong(split[1]);
            return Pair.of(userID, itemID);
        }

        @Override
        public float scaleFactor() {
            return est.getScaleFactor("S0").sizeFactor;
        }

        @Override
        public float scaleFactorByRecord() {
            return est.getScaleFactor("S0").recsFactor;
        }
    }, Writables.tableOf(Writables.longs(), Writables.longs())).groupByKey(est.getClusterSize());

    /*
     * S1
     */
    PTable<Long, Vector> userVector = userWithPrefs
            .parallelDo(new MapFn<Pair<Long, Iterable<Long>>, Pair<Long, Vector>>() {
                @Override
                public Pair<Long, Vector> map(Pair<Long, Iterable<Long>> input) {
                    Vector userVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
                    for (long itemPref : input.second()) {
                        userVector.set((int) itemPref, 1.0f);
                    }
                    return Pair.of(input.first(), userVector);
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S1").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S1").recsFactor;
                }
            }, Writables.tableOf(Writables.longs(), Writables.vectors()));

    userVector = profiler.profile("S0-S1", pipeline, userVector, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S2
     */
    PTable<Long, Vector> filteredUserVector = userVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, Vector>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Long, Vector>> emitter) {
                    if (input.second().getNumNondefaultElements() > threshold) {
                        emitter.emit(input);
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S2").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S2").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), Writables.vectors()));

    filteredUserVector = profiler.profile("S2", pipeline, filteredUserVector, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S3 + GBK
     */
    PGroupedTable<Integer, Integer> coOccurencePairs = filteredUserVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, Integer>>() {
                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, Integer>> emitter) {
                    Iterator<Vector.Element> it = input.second().iterateNonZero();
                    while (it.hasNext()) {
                        int index1 = it.next().index();
                        Iterator<Vector.Element> it2 = input.second().iterateNonZero();
                        while (it2.hasNext()) {
                            int index2 = it2.next().index();
                            emitter.emit(Pair.of(index1, index2));
                        }
                    }
                }

                @Override
                public float scaleFactor() {
                    float size = est.getScaleFactor("S3").sizeFactor;
                    return size;
                }

                @Override
                public float scaleFactorByRecord() {
                    float recs = est.getScaleFactor("S3").recsFactor;
                    return recs;
                }
            }, Writables.tableOf(Writables.ints(), Writables.ints())).groupByKey(est.getClusterSize());

    /*
     * S4
     */
    PTable<Integer, Vector> coOccurenceVector = coOccurencePairs
            .parallelDo(new MapFn<Pair<Integer, Iterable<Integer>>, Pair<Integer, Vector>>() {
                @Override
                public Pair<Integer, Vector> map(Pair<Integer, Iterable<Integer>> input) {
                    Vector cooccurrenceRow = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
                    for (int itemIndex2 : input.second()) {
                        cooccurrenceRow.set(itemIndex2, cooccurrenceRow.get(itemIndex2) + 1.0);
                    }
                    return Pair.of(input.first(), cooccurrenceRow);
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S4").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S4").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), Writables.vectors()));

    coOccurenceVector = profiler.profile("S3-S4", pipeline, coOccurenceVector, ProfileConverter.int_vector(),
            Writables.tableOf(Writables.ints(), Writables.vectors()));

    /*
     * S5 Wrapping co-occurrence columns
     */
    PTable<Integer, VectorOrPref> wrappedCooccurrence = coOccurenceVector
            .parallelDo(new MapFn<Pair<Integer, Vector>, Pair<Integer, VectorOrPref>>() {

                @Override
                public Pair<Integer, VectorOrPref> map(Pair<Integer, Vector> input) {
                    return Pair.of(input.first(), new VectorOrPref(input.second()));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S5").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S5").recsFactor;
                }

            }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    wrappedCooccurrence = profiler.profile("S5", pipeline, wrappedCooccurrence, ProfileConverter.int_vopv(),
            Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    /*
     * S6 Splitting user vectors
     */
    PTable<Integer, VectorOrPref> userVectorSplit = filteredUserVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, VectorOrPref>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, VectorOrPref>> emitter) {
                    long userID = input.first();
                    Vector userVector = input.second();
                    Iterator<Vector.Element> it = userVector.iterateNonZero();
                    while (it.hasNext()) {
                        Vector.Element e = it.next();
                        int itemIndex = e.index();
                        float preferenceValue = (float) e.get();
                        emitter.emit(Pair.of(itemIndex, new VectorOrPref(userID, preferenceValue)));
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S6").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S6").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    userVectorSplit = profiler.profile("S6", pipeline, userVectorSplit, ProfileConverter.int_vopp(),
            Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    /*
     * S7 Combine VectorOrPrefs
     */
    PTable<Integer, VectorAndPrefs> combinedVectorOrPref = wrappedCooccurrence.union(userVectorSplit)
            .groupByKey(est.getClusterSize())
            .parallelDo(new DoFn<Pair<Integer, Iterable<VectorOrPref>>, Pair<Integer, VectorAndPrefs>>() {

                @Override
                public void process(Pair<Integer, Iterable<VectorOrPref>> input,
                        Emitter<Pair<Integer, VectorAndPrefs>> emitter) {
                    Vector vector = null;
                    List<Long> userIDs = Lists.newArrayList();
                    List<Float> values = Lists.newArrayList();
                    for (VectorOrPref vop : input.second()) {
                        if (vector == null) {
                            vector = vop.getVector();
                        }
                        long userID = vop.getUserID();
                        if (userID != Long.MIN_VALUE) {
                            userIDs.add(vop.getUserID());
                        }
                        float value = vop.getValue();
                        if (!Float.isNaN(value)) {
                            values.add(vop.getValue());
                        }
                    }
                    emitter.emit(Pair.of(input.first(), new VectorAndPrefs(vector, userIDs, values)));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S7").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S7").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs()));

    combinedVectorOrPref = profiler.profile("S5+S6-S7", pipeline, combinedVectorOrPref,
            ProfileConverter.int_vap(), Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs()));
    /*
     * S8 Computing partial recommendation vectors
     */
    PTable<Long, Vector> partialMultiply = combinedVectorOrPref
            .parallelDo(new DoFn<Pair<Integer, VectorAndPrefs>, Pair<Long, Vector>>() {
                @Override
                public void process(Pair<Integer, VectorAndPrefs> input, Emitter<Pair<Long, Vector>> emitter) {
                    Vector cooccurrenceColumn = input.second().getVector();
                    List<Long> userIDs = input.second().getUserIDs();
                    List<Float> prefValues = input.second().getValues();
                    for (int i = 0; i < userIDs.size(); i++) {
                        long userID = userIDs.get(i);
                        if (userID != Long.MIN_VALUE) {
                            float prefValue = prefValues.get(i);
                            Vector partialProduct = cooccurrenceColumn.times(prefValue);
                            emitter.emit(Pair.of(userID, partialProduct));
                        }
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S8").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S8").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), Writables.vectors())).groupByKey(est.getClusterSize())
            .combineValues(new CombineFn<Long, Vector>() {

                @Override
                public void process(Pair<Long, Iterable<Vector>> input, Emitter<Pair<Long, Vector>> emitter) {
                    Vector partial = null;
                    for (Vector vector : input.second()) {
                        partial = partial == null ? vector : partial.plus(vector);
                    }
                    emitter.emit(Pair.of(input.first(), partial));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("combine").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("combine").recsFactor;
                }
            });

    partialMultiply = profiler.profile("S8-combine", pipeline, partialMultiply, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S9 Producing recommendations from vectors
     */
    PTable<Long, RecommendedItems> recommendedItems = partialMultiply
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, RecommendedItems>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Long, RecommendedItems>> emitter) {
                    Queue<RecommendedItem> topItems = new PriorityQueue<RecommendedItem>(11,
                            Collections.reverseOrder(BY_PREFERENCE_VALUE));
                    Iterator<Vector.Element> recommendationVectorIterator = input.second().iterateNonZero();
                    while (recommendationVectorIterator.hasNext()) {
                        Vector.Element element = recommendationVectorIterator.next();
                        int index = element.index();
                        float value = (float) element.get();
                        if (topItems.size() < top) {
                            topItems.add(new GenericRecommendedItem(index, value));
                        } else if (value > topItems.peek().getValue()) {
                            topItems.add(new GenericRecommendedItem(index, value));
                            topItems.poll();
                        }
                    }
                    List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size());
                    recommendations.addAll(topItems);
                    Collections.sort(recommendations, BY_PREFERENCE_VALUE);
                    emitter.emit(Pair.of(input.first(), new RecommendedItems(recommendations)));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S9").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S9").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems()));

    recommendedItems = profiler.profile("S9", pipeline, recommendedItems, ProfileConverter.long_ri(),
            Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems()));

    /*
     * Profiling
     */
    if (profiler.isProfiling()) {
        profiler.writeResultToFile(profileFilePath);
        profiler.cleanup(pipeline.getConfiguration());
        return 0;
    }
    /*
     * asText
     */
    pipeline.writeTextFile(recommendedItems, args[1]);
    PipelineResult result = pipeline.done();
    return result.succeeded() ? 0 : 1;
}

From source file:org.apache.gobblin.runtime.TaskStateCollectorService.java

/**
 * Collect output {@link TaskState}s of tasks of the job launched.
 *
 * <p>// w w w  .  j a va2  s .  c  o  m
 *   This method collects all available output {@link TaskState} files at the time it is called. It
 *   uses a {@link ParallelRunner} to deserialize the {@link TaskState}s. Each {@link TaskState}
 *   file gets deleted after the {@link TaskState} it stores is successfully collected.
 * </p>
 *
 * @throws IOException if it fails to collect the output {@link TaskState}s
 */
private void collectOutputTaskStates() throws IOException {
    List<String> taskStateNames = taskStateStore.getTableNames(outputTaskStateDir.getName(),
            new Predicate<String>() {
                @Override
                public boolean apply(String input) {
                    return input.endsWith(AbstractJobLauncher.TASK_STATE_STORE_TABLE_SUFFIX)
                            && !input.startsWith(FsStateStore.TMP_FILE_PREFIX);
                }
            });

    if (taskStateNames == null || taskStateNames.size() == 0) {
        LOGGER.debug("No output task state files found in " + this.outputTaskStateDir);
        return;
    }

    final Queue<TaskState> taskStateQueue = Queues.newConcurrentLinkedQueue();
    try (ParallelRunner stateSerDeRunner = new ParallelRunner(this.stateSerDeRunnerThreads, null)) {
        for (final String taskStateName : taskStateNames) {
            LOGGER.debug("Found output task state file " + taskStateName);
            // Deserialize the TaskState and delete the file
            stateSerDeRunner.submitCallable(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    TaskState taskState = taskStateStore.getAll(outputTaskStateDir.getName(), taskStateName)
                            .get(0);
                    taskStateQueue.add(taskState);
                    taskStateStore.delete(outputTaskStateDir.getName(), taskStateName);
                    return null;
                }
            }, "Deserialize state for " + taskStateName);
        }
    } catch (IOException ioe) {
        LOGGER.warn("Could not read all task state files.");
    }

    LOGGER.info(String.format("Collected task state of %d completed tasks", taskStateQueue.size()));

    // Add the TaskStates of completed tasks to the JobState so when the control
    // returns to the launcher, it sees the TaskStates of all completed tasks.
    for (TaskState taskState : taskStateQueue) {
        taskState.setJobState(this.jobState);
        this.jobState.addTaskState(taskState);
    }

    // Finish any addtional steps defined in handler on driver level.
    // Currently implemented handler for Hive registration only.
    if (optionalTaskCollectorHandler.isPresent()) {
        LOGGER.info(
                "Execute Pipelined TaskStateCollectorService Handler for " + taskStateQueue.size() + " tasks");

        try {
            optionalTaskCollectorHandler.get().handle(taskStateQueue);
        } catch (Throwable t) {
            if (isJobProceedOnCollectorServiceFailure) {
                log.error("Failed to commit dataset while job proceeds", t);
                SafeDatasetCommit.setTaskFailureException(taskStateQueue, t);
            } else {
                throw new RuntimeException("Hive Registration as the TaskStateCollectorServiceHandler failed.",
                        t);
            }
        }
    }

    // Notify the listeners for the completion of the tasks
    this.eventBus.post(new NewTaskCompletionEvent(ImmutableList.copyOf(taskStateQueue)));
}

From source file:eu.itesla_project.modules.validation.OfflineValidationTool.java

@Override
public void run(CommandLine line) throws Exception {
    OfflineConfig config = OfflineConfig.load();
    String rulesDbName = line.hasOption("rules-db-name") ? line.getOptionValue("rules-db-name")
            : OfflineConfig.DEFAULT_RULES_DB_NAME;
    String workflowId = line.getOptionValue("workflow");
    Path outputDir = Paths.get(line.getOptionValue("output-dir"));
    double purityThreshold = line.hasOption("purity-threshold")
            ? Double.parseDouble(line.getOptionValue("purity-threshold"))
            : DEFAULT_PURITY_THRESHOLD;//w  w  w. j  av  a 2  s  .  c o  m
    Set<Country> countries = Arrays.stream(line.getOptionValue("base-case-countries").split(","))
            .map(Country::valueOf).collect(Collectors.toSet());
    Interval histoInterval = Interval.parse(line.getOptionValue("history-interval"));
    boolean mergeOptimized = line.hasOption("merge-optimized");
    CaseType caseType = CaseType.valueOf(line.getOptionValue("case-type"));

    CaseRepositoryFactory caseRepositoryFactory = config.getCaseRepositoryFactoryClass().newInstance();
    RulesDbClientFactory rulesDbClientFactory = config.getRulesDbClientFactoryClass().newInstance();
    ContingenciesAndActionsDatabaseClient contingencyDb = config.getContingencyDbClientFactoryClass()
            .newInstance().create();
    SimulatorFactory simulatorFactory = config.getSimulatorFactoryClass().newInstance();
    LoadFlowFactory loadFlowFactory = config.getLoadFlowFactoryClass().newInstance();
    MergeOptimizerFactory mergeOptimizerFactory = config.getMergeOptimizerFactoryClass().newInstance();

    SimulationParameters simulationParameters = SimulationParameters.load();

    try (ComputationManager computationManager = new LocalComputationManager();
            RulesDbClient rulesDb = rulesDbClientFactory.create(rulesDbName);
            CsvMetricsDb metricsDb = new CsvMetricsDb(outputDir, true, "metrics")) {

        CaseRepository caseRepository = caseRepositoryFactory.create(computationManager);

        Queue<DateTime> dates = Queues.synchronizedDeque(
                new ArrayDeque<>(caseRepository.dataAvailable(caseType, countries, histoInterval)));

        Map<String, Map<RuleId, ValidationStatus>> statusPerRulePerCase = Collections
                .synchronizedMap(new TreeMap<>());
        Map<String, Map<RuleId, Map<HistoDbAttributeId, Object>>> valuesPerRulePerCase = Collections
                .synchronizedMap(new TreeMap<>());

        int cores = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(cores);
        try {
            List<Future<?>> tasks = new ArrayList<>(cores);
            for (int i = 0; i < cores; i++) {
                tasks.add(executorService.submit((Runnable) () -> {
                    while (dates.size() > 0) {
                        DateTime date = dates.poll();

                        try {
                            Network network = MergeUtil.merge(caseRepository, date, caseType, countries,
                                    loadFlowFactory, 0, mergeOptimizerFactory, computationManager,
                                    mergeOptimized);

                            System.out.println("case " + network.getId() + " loaded");

                            System.out.println("running simulation on " + network.getId() + "...");

                            network.getStateManager().allowStateMultiThreadAccess(true);
                            String baseStateId = network.getId();
                            network.getStateManager().cloneState(StateManager.INITIAL_STATE_ID, baseStateId);
                            network.getStateManager().setWorkingState(baseStateId);

                            Map<RuleId, ValidationStatus> statusPerRule = new HashMap<>();
                            Map<RuleId, Map<HistoDbAttributeId, Object>> valuesPerRule = new HashMap<>();

                            LoadFlow loadFlow = loadFlowFactory.create(network, computationManager, 0);
                            LoadFlowResult loadFlowResult = loadFlow.run();

                            System.err.println("load flow terminated (" + loadFlowResult.isOk() + ") on "
                                    + network.getId());

                            if (loadFlowResult.isOk()) {
                                Stabilization stabilization = simulatorFactory.createStabilization(network,
                                        computationManager, 0);
                                ImpactAnalysis impactAnalysis = simulatorFactory.createImpactAnalysis(network,
                                        computationManager, 0, contingencyDb);
                                Map<String, Object> context = new HashMap<>();
                                stabilization.init(simulationParameters, context);
                                impactAnalysis.init(simulationParameters, context);
                                StabilizationResult stabilizationResult = stabilization.run();

                                System.err.println("stabilization terminated ("
                                        + stabilizationResult.getStatus() + ") on " + network.getId());

                                metricsDb.store(workflowId, network.getId(), "STABILIZATION",
                                        stabilizationResult.getMetrics());

                                if (stabilizationResult.getStatus() == StabilizationStatus.COMPLETED) {
                                    ImpactAnalysisResult impactAnalysisResult = impactAnalysis
                                            .run(stabilizationResult.getState());

                                    System.err.println("impact analysis terminated on " + network.getId());

                                    metricsDb.store(workflowId, network.getId(), "IMPACT_ANALYSIS",
                                            impactAnalysisResult.getMetrics());

                                    System.out.println("checking rules on " + network.getId() + "...");

                                    for (SecurityIndex securityIndex : impactAnalysisResult
                                            .getSecurityIndexes()) {
                                        for (RuleAttributeSet attributeSet : RuleAttributeSet.values()) {
                                            statusPerRule.put(new RuleId(attributeSet, securityIndex.getId()),
                                                    new ValidationStatus(null, securityIndex.isOk()));
                                        }
                                    }
                                }
                            }

                            Map<HistoDbAttributeId, Object> values = IIDM2DB
                                    .extractCimValues(network, new IIDM2DB.Config(null, false))
                                    .getSingleValueMap();
                            for (RuleAttributeSet attributeSet : RuleAttributeSet.values()) {
                                for (Contingency contingency : contingencyDb.getContingencies(network)) {
                                    List<SecurityRule> securityRules = rulesDb.getRules(workflowId,
                                            attributeSet, contingency.getId(), null);
                                    for (SecurityRule securityRule : securityRules) {
                                        SecurityRuleExpression securityRuleExpression = securityRule
                                                .toExpression(purityThreshold);
                                        SecurityRuleCheckReport checkReport = securityRuleExpression
                                                .check(values);

                                        valuesPerRule.put(securityRule.getId(),
                                                ExpressionAttributeList
                                                        .list(securityRuleExpression.getCondition()).stream()
                                                        .collect(Collectors.toMap(attributeId -> attributeId,
                                                                new Function<HistoDbAttributeId, Object>() {
                                                                    @Override
                                                                    public Object apply(
                                                                            HistoDbAttributeId attributeId) {
                                                                        Object value = values.get(attributeId);
                                                                        return value != null ? value
                                                                                : Float.NaN;
                                                                    }
                                                                })));

                                        ValidationStatus status = statusPerRule.get(securityRule.getId());
                                        if (status == null) {
                                            status = new ValidationStatus(null, null);
                                            statusPerRule.put(securityRule.getId(), status);
                                        }
                                        if (checkReport.getMissingAttributes().isEmpty()) {
                                            status.setRuleOk(checkReport.isSafe());
                                        }
                                    }
                                }
                            }

                            statusPerRulePerCase.put(network.getId(), statusPerRule);
                            valuesPerRulePerCase.put(network.getId(), valuesPerRule);
                        } catch (Exception e) {
                            LOGGER.error(e.toString(), e);
                        }
                    }
                }));
            }
            for (Future<?> task : tasks) {
                task.get();
            }
        } finally {
            executorService.shutdown();
            executorService.awaitTermination(1, TimeUnit.MINUTES);
        }

        writeCsv(statusPerRulePerCase, valuesPerRulePerCase, outputDir);
    }
}

From source file:edu.uci.ics.hyracks.api.rewriter.runtime.SuperActivityOperatorNodePushable.java

public void init() throws HyracksDataException {
    Map<ActivityId, IOperatorNodePushable> startOperatorNodePushables = new HashMap<ActivityId, IOperatorNodePushable>();
    Queue<Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> childQueue = new LinkedList<Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>>();
    List<IConnectorDescriptor> outputConnectors = null;

    /**//from  w w  w  . jav a  2  s.c  om
     * Set up the source operators
     */
    for (Entry<ActivityId, IActivity> entry : startActivities.entrySet()) {
        IOperatorNodePushable opPushable = entry.getValue().createPushRuntime(ctx, recordDescProvider,
                partition, nPartitions);
        startOperatorNodePushables.put(entry.getKey(), opPushable);
        operatprNodePushablesBFSOrder.add(opPushable);
        operatorNodePushables.put(entry.getKey(), opPushable);
        inputArity += opPushable.getInputArity();
        outputConnectors = parent.getActivityOutputMap().get(entry.getKey());
        if (outputConnectors != null) {
            for (IConnectorDescriptor conn : outputConnectors) {
                childQueue.add(parent.getConnectorActivityMap().get(conn.getConnectorId()));
            }
        }
    }

    /**
     * Using BFS (breadth-first search) to construct to runtime execution
     * DAG;
     */
    while (childQueue.size() > 0) {
        /**
         * expend the executing activities further to the downstream
         */
        if (outputConnectors != null && outputConnectors.size() > 0) {
            for (IConnectorDescriptor conn : outputConnectors) {
                if (conn != null) {
                    childQueue.add(parent.getConnectorActivityMap().get(conn.getConnectorId()));
                }
            }
        }

        /**
         * construct the source to destination information
         */
        Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> channel = childQueue.poll();
        ActivityId sourceId = channel.getLeft().getLeft().getActivityId();
        int outputChannel = channel.getLeft().getRight();
        ActivityId destId = channel.getRight().getLeft().getActivityId();
        int inputChannel = channel.getRight().getRight();
        IOperatorNodePushable sourceOp = operatorNodePushables.get(sourceId);
        IOperatorNodePushable destOp = operatorNodePushables.get(destId);
        if (destOp == null) {
            destOp = channel.getRight().getLeft().createPushRuntime(ctx, recordDescProvider, partition,
                    nPartitions);
            operatprNodePushablesBFSOrder.add(destOp);
            operatorNodePushables.put(destId, destOp);
        }

        /**
         * construct the dataflow connection from a producer to a consumer
         */
        sourceOp.setOutputFrameWriter(outputChannel, destOp.getInputFrameWriter(inputChannel),
                recordDescProvider.getInputRecordDescriptor(destId, inputChannel));

        /**
         * traverse to the child of the current activity
         */
        outputConnectors = parent.getActivityOutputMap().get(destId);
    }
}

From source file:com.dell.asm.asmcore.asmmanager.util.template.adjuster.ClusterAdjuster.java

/**
 * If has NO server, create predefined VDS and port group:
 * PXE VDS - [User selects from VDS available in the datacenter]
 * PXE Port Group - [ User selects from available port groups on the PXE VDS]
 * Workload VDS - [ User selects from VDS available in the datacenter]
 *
 * @param cluster/*w ww  . j a  va 2  s . c o m*/
 * @param allNetworks
 * @param hasServer
 */
private void refineClusterByServerNetworks(ServiceTemplateComponent cluster,
        List<PartitionNetworks> allNetworks, boolean hasServer) {
    // check if it is vCenter cluster
    ServiceTemplateCategory vdsCategory = cluster
            .getTemplateResource(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID);
    if (vdsCategory == null)
        return;

    int v = 1;
    ServiceTemplateSetting vdsNameZero = cluster.getParameter(
            ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
            ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_NAME_ID);

    // newly added VDS members
    List<ServiceTemplateSetting> vdsAdded = new ArrayList<>();

    ServiceTemplateSetting vdsNew = null;

    if (hasServer) {

        //Restore option Enable VMWare vSAN if server is associated with the cluster          
        ServiceTemplateSetting enableVmwareVsan = cluster.getParameter(
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_ID,
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_CLUSTER_VSAN_ID);
        if (enableVmwareVsan != null) {
            enableVmwareVsan.setHideFromTemplate(false);
        }

        // first need to count some networks
        List<Network> iscsiNets = new ArrayList<>();
        List<Network> vsanNets = new ArrayList<>();

        for (PartitionNetworks pn : allNetworks) {
            for (Network nConfig : pn.getNetworks()) {
                if (NetworkType.STORAGE_ISCSI_SAN.equals(nConfig.getType())) {
                    // replace "iscsi" in the network ID by combination of sorted ISCSI net IDs
                    List<String> sortedNetIDs = pn.sortISCSINetworks();
                    nConfig.setId(StringUtils.join(sortedNetIDs, "-"));

                    // will need to count later
                    if (!iscsiNets.contains(nConfig)) {
                        iscsiNets.add(nConfig);
                    }
                }
            }
        }

        for (PartitionNetworks pn : allNetworks) {
            pn.sortById();

            ServiceTemplateSetting vdsName = cluster.getParameter(
                    ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
                    ServiceTemplateClientUtil.createVDSID(pn.getId()));

            String uiGroupName = "VDS " + v;
            if (vdsName == null) {
                vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory,
                        ServiceTemplateClientUtil.createVDSID(pn.getId()), "VDS Name", uiGroupName,
                        ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
            } else {
                // upgrade options only
                vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
            }

            // hard reset for UI group
            vdsName.setGroup(uiGroupName);

            vdsName.setHideFromTemplate(false);
            vdsAdded.add(vdsName);

            // $new$
            vdsNew = vdsCategory.getParameter(
                    ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId());
            if (vdsNew != null) {
                vdsNew.setGroup(uiGroupName);
                vdsAdded.add(vdsNew);
            }

            if (pn.hasManagementNetwork()) {
                vdsName.setRequired(true);
                if (vdsNew != null)
                    vdsNew.setRequired(true);
            }

            // for each network find or create PG
            Queue<NetworkObject> iscsiNetworkFIFO = new LinkedList<>();
            iscsiNetworkFIFO.addAll(pn.getIscsiNetworks());

            for (Network nConfig : pn.getNetworks()) {
                Queue<NetworkObject> currentQueue = null;
                String portGroupName = nConfig.getName() + " Port Group";
                int cnt = 1;
                if (NetworkType.STORAGE_ISCSI_SAN.equals(nConfig.getType())) {
                    currentQueue = iscsiNetworkFIFO;
                    if (iscsiNets.size() == 1) {
                        cnt = 2; // 2 PG but only if we have one ISCSI network.
                    }
                }

                boolean incrementPortGroup = (cnt > 1 && currentQueue.size() == 1);
                // multiple PGs for certain networks
                for (int j = 1; j <= cnt; j++) {
                    String currGroupName = portGroupName;
                    String portGroupSufix = "";
                    if (incrementPortGroup) {
                        portGroupSufix = " " + j;
                    }

                    String pgNetworkID = nConfig.getId();
                    // can be only 1 or 2 ISCSI.
                    // But we always need 2 port groups for such networks.
                    // Names and IDs have to be picked from dedicated list
                    if (pgNetworkID.contains("-") && currentQueue != null) {
                        NetworkObject networkObject = currentQueue.remove();
                        if (networkObject != null) {
                            pgNetworkID = networkObject.getId();
                            currGroupName = networkObject.getName() + " Port Group";
                        }
                    }

                    currGroupName += portGroupSufix;

                    ServiceTemplateSetting vdsPG = ServiceTemplateClientUtil.getPortGroup(cluster, pn.getId(),
                            currGroupName, pgNetworkID, j, true);
                    if (vdsPG == null) {
                        // unexpected...
                        LOGGER.error("getPortGroup returned null for VDS ID=" + pn.getId() + ", PG="
                                + currGroupName);
                        throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR,
                                AsmManagerMessages.internalError());
                    }
                    vdsPG.setDisplayName(currGroupName);
                    vdsPG.setHideFromTemplate(false);
                    vdsPG.setGroup(uiGroupName);

                    vdsAdded.add(vdsPG);
                    // $new$
                    vdsNew = vdsCategory.getParameter(
                            ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsPG.getId());
                    if (vdsNew != null) {
                        vdsNew.setGroup(uiGroupName);
                        vdsAdded.add(vdsNew);
                    }

                    if (NetworkType.PXE.equals(nConfig.getType())
                            || NetworkType.HYPERVISOR_MANAGEMENT.equals(nConfig.getType())) {
                        vdsPG.setRequired(true);
                        if (vdsNew != null)
                            vdsNew.setRequired(true);
                    }

                }

            }

            v++;
        }

    } else {

        //Remove option Enable VMWare vSAN if server is not associated with the cluster          
        ServiceTemplateSetting enableVmwareVsan = cluster.getParameter(
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_ID,
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_CLUSTER_VSAN_ID);
        if (enableVmwareVsan != null) {
            enableVmwareVsan.setHideFromTemplate(true);
        }

        ServiceTemplateSetting vdsName = cluster.getParameter(
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
                ServiceTemplateClientUtil.createVDSID("pxe"));

        if (vdsName == null) {
            vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory,
                    ServiceTemplateClientUtil.createVDSID("pxe"), "VDS Name", "PXE VDS",
                    ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));

            vdsName.setHideFromTemplate(false);
        } else {
            vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
        }

        vdsAdded.add(vdsName);
        vdsNew = vdsCategory
                .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId());
        if (vdsNew != null) {
            vdsAdded.add(vdsNew);
        }

        // PXE Port Group
        ServiceTemplateSetting vdsPG = ServiceTemplateClientUtil.getPortGroup(cluster, "pxe", "PXE Port Group",
                "pxe", 1, true);
        if (vdsPG == null) {
            // unexpected...
            LOGGER.error("getPortGroup returned null for VDS ID=pxe" + ", PG=PXE Port Group");
            throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR,
                    AsmManagerMessages.internalError());
        }
        vdsPG.setDisplayName("PXE Port Group");
        vdsPG.setHideFromTemplate(false);

        vdsAdded.add(vdsPG);
        vdsNew = vdsCategory
                .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsPG.getId());
        if (vdsNew != null) {
            vdsAdded.add(vdsNew);
        }

        vdsName = cluster.getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
                ServiceTemplateClientUtil.createVDSID("workload"));

        if (vdsName == null) {
            vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory,
                    ServiceTemplateClientUtil.createVDSID("workload"), "VDS Name", "Workload VDS",
                    ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));

            vdsName.setHideFromTemplate(false);
            vdsName.setRequired(true);
        } else {
            vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
        }

        vdsAdded.add(vdsName);
        vdsNew = vdsCategory
                .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId());
        if (vdsNew != null) {
            vdsNew.setRequired(true);
            vdsAdded.add(vdsNew);
        }

    }

    // remove old VDS names / PGs
    List<ServiceTemplateSetting> toRemove = new ArrayList<>();
    for (ServiceTemplateSetting vdsName : vdsCategory.getParameters()) {
        if (!vdsName.getId().contains(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_NAME_ID + "::")
                && !vdsName.getId()
                        .contains(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_PG_ID + "::"))
            continue;

        toRemove.add(vdsName);
    }
    vdsCategory.getParameters().removeAll(toRemove);

    // re-add new parameters
    vdsCategory.getParameters().addAll(vdsAdded);

}

From source file:org.jboss.errai.ioc.rebind.ioc.graph.impl.DependencyGraphBuilderImpl.java

private void processResolutionQueue(final Queue<AbstractInjectable> resolutionQueue,
        final Multimap<ResolutionPriority, ConcreteInjectable> resolvedByPriority) {
    do {/*from   w ww . j a v  a  2 s .co m*/
        final AbstractInjectable cur = resolutionQueue.poll();
        for (final BaseInjectable link : cur.linked) {
            if (link instanceof AbstractInjectable) {
                resolutionQueue.add((AbstractInjectable) link);
            } else if (link instanceof ConcreteInjectable) {
                resolvedByPriority.put(getMatchingPriority(link), (ConcreteInjectable) link);
            }
        }
    } while (resolutionQueue.size() > 0);
}

From source file:org.apache.hyracks.api.rewriter.runtime.SuperActivityOperatorNodePushable.java

private void init() throws HyracksDataException {
    Map<ActivityId, IOperatorNodePushable> startOperatorNodePushables = new HashMap<ActivityId, IOperatorNodePushable>();
    Queue<Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> childQueue = new LinkedList<Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>>();
    List<IConnectorDescriptor> outputConnectors = null;

    /**/*  ww  w.ja  v a2s .  c  o  m*/
     * Set up the source operators
     */
    for (Entry<ActivityId, IActivity> entry : startActivities.entrySet()) {
        IOperatorNodePushable opPushable = entry.getValue().createPushRuntime(ctx, recordDescProvider,
                partition, nPartitions);
        startOperatorNodePushables.put(entry.getKey(), opPushable);
        operatorNodePushablesBFSOrder.add(opPushable);
        operatorNodePushables.put(entry.getKey(), opPushable);
        inputArity += opPushable.getInputArity();
        outputConnectors = parent.getActivityOutputMap().get(entry.getKey());
        if (outputConnectors != null) {
            for (IConnectorDescriptor conn : outputConnectors) {
                childQueue.add(parent.getConnectorActivityMap().get(conn.getConnectorId()));
            }
        }
    }

    /**
     * Using BFS (breadth-first search) to construct to runtime execution
     * DAG;
     */
    while (childQueue.size() > 0) {
        /**
         * construct the source to destination information
         */
        Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> channel = childQueue.poll();
        ActivityId sourceId = channel.getLeft().getLeft().getActivityId();
        int outputChannel = channel.getLeft().getRight();
        ActivityId destId = channel.getRight().getLeft().getActivityId();
        int inputChannel = channel.getRight().getRight();
        IOperatorNodePushable sourceOp = operatorNodePushables.get(sourceId);
        IOperatorNodePushable destOp = operatorNodePushables.get(destId);
        if (destOp == null) {
            destOp = channel.getRight().getLeft().createPushRuntime(ctx, recordDescProvider, partition,
                    nPartitions);
            operatorNodePushablesBFSOrder.add(destOp);
            operatorNodePushables.put(destId, destOp);
        }

        /**
         * construct the dataflow connection from a producer to a consumer
         */
        sourceOp.setOutputFrameWriter(outputChannel, destOp.getInputFrameWriter(inputChannel),
                recordDescProvider.getInputRecordDescriptor(destId, inputChannel));

        /**
         * traverse to the child of the current activity
         */
        outputConnectors = parent.getActivityOutputMap().get(destId);

        /**
         * expend the executing activities further to the downstream
         */
        if (outputConnectors != null && outputConnectors.size() > 0) {
            for (IConnectorDescriptor conn : outputConnectors) {
                if (conn != null) {
                    childQueue.add(parent.getConnectorActivityMap().get(conn.getConnectorId()));
                }
            }
        }
    }

    // Sets the startedInitialization flags to be false.
    startedInitialization = new boolean[operatorNodePushablesBFSOrder.size()];
    Arrays.fill(startedInitialization, false);
}

From source file:candr.yoclip.ParserTest.java

@Test
public void getParsedOption() {
    final ParserOption<ParserTest> mockedOption = createMockOption("o");
    when(mockedOption.hasValue()).thenReturn(true);
    final List<ParserOption<ParserTest>> parserOptions = Arrays.asList(mockedOption);

    final ParserOptions<ParserTest> mockParserOptions = createMockParserParameters("+", "=");
    when(mockParserOptions.get()).thenReturn(parserOptions);
    when(mockParserOptions.get("o")).thenReturn(mockedOption);

    final Queue<String> parameters = new LinkedList<String>();
    final Parser<ParserTest> testCase = new Parser<ParserTest>(mockParserOptions,
            createMockParserHelpFactory());
    assertThat("empty queue", testCase.getParsedOption(parameters), nullValue());

    parameters.add("+o=foobar");
    ParsedOption<ParserTest> parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o error", parsedOption.isError(), is(false));
    assertThat("+o parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o value", parsedOption.getValue(), is("foobar"));
    assertThat("queue size after parsed parameter", parameters.size(), is(0));

    parameters.add("+o=");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o missing value error", parsedOption.isError(), is(false));
    assertThat("+o missing value parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o missing value not null", parsedOption.getValue(), nullValue());
    assertThat("queue size after missing value", parameters.size(), is(0));

    parameters.add("+o");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o missing separator error", parsedOption.isError(), is(false));
    assertThat("+o missing separator parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o missing separator not null", parsedOption.getValue(), nullValue());
    assertThat("queue size after missing separator", parameters.size(), is(0));

    when(mockParserOptions.getSeparator()).thenReturn(" ");
    parameters.add("+o");
    parameters.add("foobar");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o whitespace error", parsedOption.isError(), is(false));
    assertThat("+o whitespace parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o whitespace value", parsedOption.getValue(), is("foobar"));
    assertThat("+o whitespace queue size", parameters.size(), is(0));

    when(mockParserOptions.getSeparator()).thenReturn(" ");
    parameters.add("+o");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o whitespace missing value error", parsedOption.isError(), is(false));
    assertThat("+o whitespace missing value parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o whitespace missing value not null", parsedOption.getValue(), nullValue());
    assertThat("+o whitespace queue size after missing value", parameters.size(), is(0));

    when(mockParserOptions.getSeparator()).thenReturn("=");
    when(mockedOption.hasValue()).thenReturn(false);
    parameters.add("+o=value");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o with value error", parsedOption.isError(), is(true));
    assertThat("+o with value parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o with value error is null", parsedOption.getError(), notNullValue());
    assertThat("+o whitespace queue size after missing value", parameters.size(), is(0));

    parameters.add("+o");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o with boolean", parsedOption.isError(), is(false));
    assertThat("+o with boolean parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o with boolean value", parsedOption.getValue(), is(Boolean.TRUE.toString()));
    assertThat("+o whitespace queue size after missing value", parameters.size(), is(0));
}

From source file:info.raack.appliancelabeler.machinelearning.appliancedetection.algorithms.BasePowerDrawDetectionAlgorithm.java

public AlgorithmPredictions algorithmCalculateApplianceEnergyUsePredictions(EnergyMonitor energyMonitor,
        Queue<EnergyTimestep> originTimesteps, ItemReader<SecondData> dataReader) {

    AlgorithmPredictions algorithmPredictions = new AlgorithmPredictions();

    Map<UserAppliance, List<EnergyTimestep>> applianceTimesteps = new HashMap<UserAppliance, List<EnergyTimestep>>();

    // get all of the possible user appliances and their last known on/off state
    List<UserAppliance> apps = database.getUserAppliancesForAlgorithmForEnergyMonitor(energyMonitor, getId());

    Map<UserAppliance, Double> currentTimestepEnergyConsumption = new HashMap<UserAppliance, Double>();

    for (UserAppliance appliance : apps) {
        currentTimestepEnergyConsumption.put(appliance, 0d);
        applianceTimesteps.put(appliance, new ArrayList<EnergyTimestep>());
    }/*from   www .  ja v  a  2s.  c o  m*/

    Map<Long, List<ApplianceStateTransition>> stateTransitions = new HashMap<Long, List<ApplianceStateTransition>>();

    if (originTimesteps.size() > 0) {
        // ASSUMPTION - measurements are in chronological order
        if (apps.size() > 0) {

            // run whatever the energy delta state transition detectors models predict for these new data points
            stateTransitions = detectStateTransitions(
                    database.getAlgorithmResultForMonitorAndAlgorithm(energyMonitor, this), apps.get(0),
                    dataReader);

            // reset the data reader
            dataReader.moveToBeginning();

            EnergyTimestep currentTimestep = originTimesteps.poll();

            Map<UserAppliance, ApplianceState> applianceStates = new HashMap<UserAppliance, ApplianceState>();

            // while we have timesteps remaining
            //logger.debug("Current timestep: " + currentTimestep.getStartTime() + " - " + currentTimestep.getEndTime());

            long currentTimestepEndTime = currentTimestep.getEndTime().getTime();

            // for each second in the measurement list
            try {
                for (SecondData measurement = dataReader.read(); measurement != null; measurement = dataReader
                        .read()) {
                    long currentMeasurementTime = measurement.getCalLong();

                    while (currentMeasurementTime > currentTimestepEndTime) {
                        //logger.debug("End of timestep " + currentTimestep.getEndTime() + "; getting next timestamp");

                        // get new timestep
                        currentTimestep = originTimesteps.poll();

                        // need to check to see if the current timestep is not null - we won't process up to the very last second, as some will run over the last full 5 minute block
                        if (currentTimestep == null) {
                            // done!
                            break;
                        } else {
                            currentTimestepEndTime = currentTimestep.getEndTime().getTime();
                        }
                    }

                    // update the states of any of the appliances based on any state transitions at this second
                    if (stateTransitions.containsKey(currentMeasurementTime)) {
                        updateStateForAppliances(applianceStates, stateTransitions.get(currentMeasurementTime),
                                measurement);
                    } else {
                        updateStateForAppliances(applianceStates, new ArrayList<ApplianceStateTransition>(),
                                measurement);
                    }

                    for (UserAppliance userAppliance : currentTimestepEnergyConsumption.keySet()) {
                        // is appliance on?
                        if (applianceStates.get(userAppliance) != null
                                && applianceStates.get(userAppliance).isOn() == true) {

                            ApplianceState applianceState = applianceStates.get(userAppliance);

                            double previousConsumption = currentTimestepEnergyConsumption.get(userAppliance);

                            // BIG ASSUMPTION OF THIS ALGORITHM - appliances all take constant power during their operation = power delta (watts) * 1 second
                            double newConsumption = applianceState.getCurrentPower();
                            //logger.debug("Appliance " + userAppliance + " last transition was to on; adding " + newConsumption + " watt-seconds to energy consumption");

                            // add previous consumption plus new consumption
                            currentTimestepEnergyConsumption.put(userAppliance,
                                    previousConsumption + newConsumption);
                        }
                    }

                    if (currentMeasurementTime == currentTimestepEndTime) {
                        //logger.debug("Timestep start " + currentTimestep.getStartTime() + "; closing energy measurement");
                        // save current energy consumption in this timestep and reset counter
                        for (UserAppliance appliance : apps) {
                            if (currentTimestepEnergyConsumption.get(appliance) > 0) {
                                EnergyTimestep step = currentTimestep.copyWithoutEnergyOrAppliance();

                                step.setEnergyConsumed(currentTimestepEnergyConsumption.get(appliance));
                                step.setUserAppliance(appliance);
                                applianceTimesteps.get(appliance).add(step);
                            }

                            currentTimestepEnergyConsumption.put(appliance, 0d);
                        }

                        // get new timestep
                        currentTimestep = originTimesteps.poll();

                        // need to check to see if the current timestep is not null - we won't process up to the very last second, as some will run over the last full 5 minute block
                        if (currentTimestep == null) {
                            // done!
                            break;
                        } else {
                            currentTimestepEndTime = currentTimestep.getEndTime().getTime();
                        }
                    }
                }
            } catch (Exception e) {
                throw new RuntimeException("Cannot calculate energy consumption predictions", e);
            }
            logger.debug("Done with energy usage calculations");
        }
    }

    List<ApplianceStateTransition> onlyStateTransitions = new ArrayList<ApplianceStateTransition>();
    for (List<? extends ApplianceStateTransition> list : stateTransitions.values()) {
        onlyStateTransitions.addAll(list);
    }

    algorithmPredictions.setStateTransitions(onlyStateTransitions);
    algorithmPredictions.setEnergyTimesteps(applianceTimesteps);

    return algorithmPredictions;
}

From source file:edu.abreksa.BeanTest.components.Test.java

public static Test testLoader(String filepath) throws IOException {
    Log.debug("Loading test \"" + filepath + "\"");
    File testFile = new File(filepath);
    if (!testFile.exists()) {
        throw new FileNotFoundException("The test definition file \"" + filepath + "\" does not exist.");
    }/*from w  ww  .  j a va2s . c  o  m*/
    Test test = Main.gson.fromJson(Main.Utils.readFile(testFile.getAbsolutePath()), Test.class);
    StringBuilder stringBuilder = new StringBuilder();
    Queue<Object> queue = new LinkedList<Object>();
    if (!Main.config.disableHelperMethods) {
        queue.add("imports(){import *;}\r\nimports();\r\n");
        //Add log methods
        queue.add("debug(string){com.esotericsoftware.minlog.Log.debug(testHandle.getName(), string);}");
        queue.add("error(string){com.esotericsoftware.minlog.Log.error(testHandle.getName(), string);}");
        queue.add("warn(string){com.esotericsoftware.minlog.Log.warn(testHandle.getName(), string);}");
        queue.add("info(string){com.esotericsoftware.minlog.Log.info(testHandle.getName(), string);}");
        queue.add("trace(string){com.esotericsoftware.minlog.Log.trace(testHandle.getName(), string);}");
        //Add (webDriver/selenium)/webClient variables
        queue.add(
                "if(testHandle.getDriver().equals(\"WebDriver\")){webDriver = testHandle.getWebDriver();} else if(testHandle.getDriver().equals(\"HTMLUnit\")){webClient = testHandle.getWebClient();}");
    }
    //The before scripts
    for (String string : Main.config.before) {
        queue.add(new File(string));
    }
    //The test before
    for (String string : test.before) {
        queue.add(new File(string));
    }
    if (test.scriptCode != null) {
        queue.add(test.scriptCode);
    }
    //The test
    File scriptFile = new File(test.script);
    if (!scriptFile.exists()) {
        throw new FileNotFoundException("The script file \"" + test.script + "\" does not exist.");
    } else {
        queue.add(new File(scriptFile.getAbsolutePath()));
    }
    //The test after
    for (String string : test.after) {
        queue.add(new File(string));
    }
    //The after
    for (String string : Main.config.after) {
        queue.add(new File(string));
    }
    Log.debug("Queued " + Main.gson.toJson(queue));
    Log.debug("Loading " + queue.size() + " external scripts");
    for (Object object : queue) {
        if (object instanceof File) {
            if (((File) object).exists()) {
                Log.debug("Loading external script file \"" + object + "\"");
                stringBuilder.append("\r\n" + Main.Utils.readFile(((File) object).getAbsolutePath()));
            } else {
                Log.warn("External script file \"" + object + "\" does not exist.");
            }
        } else if (object instanceof String) {
            stringBuilder.append("\r\n" + object);
        }
    }
    test.scriptCode = stringBuilder.toString().replace("\r\n\r\n", "\r\n");
    Log.debug("Final script \"" + test.scriptCode + "\"");
    return test;
}