Example usage for java.util Queue poll

List of usage examples for java.util Queue poll

Introduction

In this page you can find the example usage for java.util Queue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:org.dkpro.lab.engine.impl.MultiThreadBatchTaskEngine.java

@Override
protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig,
        Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException {
    if (log.isTraceEnabled()) {
        // Show all subtasks executed so far
        for (String est : aExecutedSubtasks) {
            log.trace("-- Already executed: " + est);
        }/*from   w  ww  . j  av  a 2s  . c o  m*/
    }

    // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is
    // extended as the subtasks of this batch are executed with the present configuration.
    // FIXME: That means that sub-batch-tasks in two different configurations cannot see
    // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally
    // maintained *across* configurations, so maybe the scope should also be maintained
    // *across* configurations? - REC 2014-06-15
    Set<String> scope = new HashSet<>();
    if (aConfiguration.getScope() != null) {
        scope.addAll(aConfiguration.getScope());
    }

    // Configure subtasks
    for (Task task : aConfiguration.getTasks()) {
        // Now the setup is complete
        aContext.getLifeCycleManager().configure(aContext, task, aConfig);
    }

    Queue<Task> queue = new LinkedList<>(aConfiguration.getTasks());
    // keeps track of the execution threads; 
    // TODO MW: do we really need this or can we work with the futures list only?
    Map<Task, ExecutionThread> threads = new HashMap<>();
    // keeps track of submitted Futures and their associated tasks
    Map<Future<?>, Task> futures = new HashMap<Future<?>, Task>();
    // will be instantiated with all exceptions from current loop
    ConcurrentMap<Task, Throwable> exceptionsFromLastLoop = null;
    ConcurrentMap<Task, Throwable> exceptionsFromCurrentLoop = new ConcurrentHashMap<>();

    int outerLoopCounter = 0;

    // main loop
    do {
        outerLoopCounter++;

        threads.clear();
        futures.clear();
        ExecutorService executor = Executors.newFixedThreadPool(maxThreads);

        // set the exceptions from the last loop
        exceptionsFromLastLoop = new ConcurrentHashMap<>(exceptionsFromCurrentLoop);

        // Fix MW: Clear exceptionsFromCurrentLoop; otherwise the loop with run at most twice.
        exceptionsFromCurrentLoop.clear();

        // process all tasks from the queue
        while (!queue.isEmpty()) {
            Task task = queue.poll();

            TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig,
                    aExecutedSubtasks);

            // Check if a subtask execution compatible with the present configuration has
            // does already exist ...
            if (execution == null) {
                // ... otherwise execute it with the present configuration
                log.info("Executing task [" + task.getType() + "]");

                // set scope here so that the inherited scopes are considered
                if (task instanceof BatchTask) {
                    ((BatchTask) task).setScope(scope);
                }

                ExecutionThread thread = new ExecutionThread(aContext, task, aConfig, aExecutedSubtasks);
                threads.put(task, thread);

                futures.put(executor.submit(thread), task);
            } else {
                log.debug("Using existing execution [" + execution.getId() + "]");

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

        // try and get results from all futures to check for failed executions
        for (Map.Entry<Future<?>, Task> entry : futures.entrySet()) {
            try {
                entry.getKey().get();
            } catch (java.util.concurrent.ExecutionException ex) {
                Task task = entry.getValue();
                // TODO MW: add a retry-counter here to prevent endless loops?
                log.info("Task exec failed for [" + task.getType() + "]");
                // record the failed task, so that it can be re-added to the queue
                exceptionsFromCurrentLoop.put(task, ex);
            } catch (InterruptedException ex) {
                // thread interrupted, exit
                throw new RuntimeException(ex);
            }
        }

        log.debug("Calling shutdown");
        executor.shutdown();
        log.debug("All threads finished");

        // collect the results
        for (Map.Entry<Task, ExecutionThread> entry : threads.entrySet()) {
            Task task = entry.getKey();
            ExecutionThread thread = entry.getValue();
            TaskContextMetadata execution = thread.getTaskContextMetadata();

            // probably failed
            if (execution == null) {
                Throwable exception = exceptionsFromCurrentLoop.get(task);
                if (!(exception instanceof UnresolvedImportException)
                        && !(exception instanceof java.util.concurrent.ExecutionException)) {
                    throw new RuntimeException(exception);
                }
                exceptionsFromCurrentLoop.put(task, exception);

                // re-add to the queue
                queue.add(task);
            } else {

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

    }
    // finish if the same tasks failed again
    while (!exceptionsFromCurrentLoop.keySet().equals(exceptionsFromLastLoop.keySet()));
    // END OF DO; finish if the same tasks failed again

    if (!exceptionsFromCurrentLoop.isEmpty()) {
        // collect all details
        StringBuilder details = new StringBuilder();
        for (Throwable throwable : exceptionsFromCurrentLoop.values()) {
            details.append("\n -");
            details.append(throwable.getMessage());
        }

        // we re-throw the first exception
        Throwable next = exceptionsFromCurrentLoop.values().iterator().next();
        if (next instanceof RuntimeException) {
            throw (RuntimeException) next;
        }

        // otherwise wrap it
        throw new RuntimeException(details.toString(), next);
    }
    log.info("MultiThreadBatchTask completed successfully. Total number of outer loop runs: "
            + outerLoopCounter);
}

From source file:org.unitime.timetable.model.Solution.java

@Override
public Set<Assignment> getConflicts(Long classId) throws Exception {
    if (classId == null)
        return null;
    Class_ clazz = Class_DAO.getInstance().get(classId);
    if (clazz == null || clazz.isCancelled())
        return null;
    Assignment assignment = getAssignment(clazz);
    if (assignment == null)
        return null;
    Set<Assignment> conflicts = new HashSet<Assignment>();
    if (assignment.getRooms() != null)
        for (Location room : assignment.getRooms()) {
            if (!room.isIgnoreRoomCheck()) {
                for (Assignment a : room.getAssignments(this))
                    if (!assignment.equals(a) && !a.getClazz().isCancelled() && assignment.overlaps(a)
                            && !clazz.canShareRoom(a.getClazz()))
                        conflicts.add(a);
            }/*w w  w .ja v  a2  s .com*/
        }

    if (clazz.getClassInstructors() != null)
        for (ClassInstructor instructor : clazz.getClassInstructors()) {
            if (!instructor.isLead())
                continue;
            for (DepartmentalInstructor di : DepartmentalInstructor
                    .getAllForInstructor(instructor.getInstructor())) {
                for (ClassInstructor ci : di.getClasses()) {
                    if (ci.equals(instructor))
                        continue;
                    Assignment a = getAssignment(ci.getClassInstructing());
                    if (a != null && !a.getClazz().isCancelled() && assignment.overlaps(a)
                            && !clazz.canShareInstructor(a.getClazz()))
                        conflicts.add(a);
                }
            }
        }

    Class_ parent = clazz.getParentClass();
    while (parent != null) {
        Assignment a = getAssignment(parent);
        if (a != null && !a.getClazz().isCancelled() && assignment.overlaps(a))
            conflicts.add(a);
        parent = parent.getParentClass();
    }

    Queue<Class_> children = new LinkedList(clazz.getChildClasses());
    Class_ child = null;
    while ((child = children.poll()) != null) {
        Assignment a = getAssignment(child);
        if (a != null && !a.getClazz().isCancelled() && assignment.overlaps(a))
            conflicts.add(a);
        if (!child.getChildClasses().isEmpty())
            children.addAll(child.getChildClasses());
    }

    for (Iterator<SchedulingSubpart> i = clazz.getSchedulingSubpart().getInstrOfferingConfig()
            .getSchedulingSubparts().iterator(); i.hasNext();) {
        SchedulingSubpart ss = i.next();
        if (ss.getClasses().size() == 1) {
            child = ss.getClasses().iterator().next();
            if (clazz.equals(child))
                continue;
            Assignment a = getAssignment(child);
            if (a != null && !a.getClazz().isCancelled() && assignment.overlaps(a))
                conflicts.add(a);
        }
    }

    return conflicts;
}

From source file:org.ohmage.query.impl.CampaignQueries.java

public void createCampaign(final Campaign campaign, final Collection<String> classIds,
        final String creatorUsername) throws DataAccessException {

    // Create the transaction.
    DefaultTransactionDefinition def = new DefaultTransactionDefinition();
    def.setName("Creating a new campaign.");

    try {//from  w ww.j  av  a2  s  .co  m
        // Begin the transaction.
        PlatformTransactionManager transactionManager = new DataSourceTransactionManager(getDataSource());
        TransactionStatus status = transactionManager.getTransaction(def);

        String iconUrlString = null;
        URL iconUrl = campaign.getIconUrl();
        if (iconUrl != null) {
            iconUrlString = iconUrl.toString();
        }

        String xml;
        try {
            xml = campaign.getXml();
        } catch (DomainException e) {
            transactionManager.rollback(status);
            throw new DataAccessException("The XML could not be saved.");
        }

        // Create the campaign.
        try {
            getJdbcTemplate().update(SQL_INSERT_CAMPAIGN,
                    new Object[] { campaign.getId(), campaign.getName(), xml, campaign.getDescription(),
                            iconUrlString, campaign.getAuthoredBy(), campaign.getRunningState().toString(),
                            campaign.getPrivacyState().toString() });
        } catch (org.springframework.dao.DataAccessException e) {
            transactionManager.rollback(status);
            throw new DataAccessException("Error executing SQL '" + SQL_INSERT_CAMPAIGN + "' with parameters: "
                    + campaign.getId() + ", " + campaign.getName() + ", " + xml + ", "
                    + campaign.getDescription() + ", " + iconUrlString + ", " + campaign.getAuthoredBy() + ", "
                    + campaign.getRunningState().toString() + ", " + campaign.getPrivacyState().toString(), e);
        }

        // Create the set of survey and prompt IDs for this campaign.
        final Set<String> surveyIds = new HashSet<String>();
        final Set<String> promptIds = new HashSet<String>();

        // Loop through all of the surveys and add the survey and prompt
        // IDs.
        for (Survey survey : campaign.getSurveys().values()) {
            // Get this survey's ID.
            surveyIds.add(survey.getId());

            Queue<SurveyItem> surveyItems = new LinkedList<SurveyItem>();
            surveyItems.addAll(survey.getSurveyItems().values());
            while (surveyItems.size() > 0) {
                SurveyItem surveyItem = surveyItems.poll();

                if (surveyItem instanceof RepeatableSet) {
                    RepeatableSet repeatableSet = (RepeatableSet) surveyItem;

                    for (SurveyItem rsSurveyItem : repeatableSet.getSurveyItems().values()) {
                        surveyItems.add(rsSurveyItem);
                    }
                } else if (surveyItem instanceof Prompt) {
                    promptIds.add(((Prompt) surveyItem).getId());
                }
            }
        }

        // Get the campaign's ID.
        final String campaignId = campaign.getId();

        // Compile the list of parameters for the survey ID lookup table.
        List<Object[]> surveyParameters = new ArrayList<Object[]>(surveyIds.size());
        for (String surveyId : surveyIds) {
            Object[] params = new Object[2];
            params[0] = surveyId;
            params[1] = campaignId;
            surveyParameters.add(params);
        }

        // The SQL to write the data.
        final String surveyIdLookupBatchSql = "INSERT INTO " + "campaign_survey_lookup(survey_id, campaign_id) "
                + "VALUES (?, (SELECT id FROM campaign WHERE urn = ?))";

        // Add the survey IDs to the lookup table.
        try {
            getJdbcTemplate().batchUpdate(surveyIdLookupBatchSql, surveyParameters);
        } catch (org.springframework.dao.DataAccessException e) {
            transactionManager.rollback(status);
            throw new DataAccessException("Error executing SQL '" + surveyIdLookupBatchSql + "'.", e);
        }

        // Compile the list of parameters for the prompt ID lookup table.
        List<Object[]> promptParameters = new ArrayList<Object[]>(surveyIds.size());
        for (String promptId : promptIds) {
            Object[] params = new Object[2];
            params[0] = promptId;
            params[1] = campaignId;
            promptParameters.add(params);
        }

        // The SQL to write the data.
        final String promptIdLookupBatchSql = "INSERT INTO " + "campaign_prompt_lookup(prompt_id, campaign_id) "
                + "VALUES (?, (SELECT id FROM campaign WHERE urn = ?))";

        // Add the prompt IDs to the lookup table.
        try {
            getJdbcTemplate().batchUpdate(promptIdLookupBatchSql, promptParameters);
        } catch (org.springframework.dao.DataAccessException e) {
            transactionManager.rollback(status);
            throw new DataAccessException("Error executing SQL '" + promptIdLookupBatchSql + "'.", e);
        }

        // Add each of the classes to the campaign.
        for (String classId : classIds) {
            associateCampaignAndClass(transactionManager, status, campaign.getId(), classId);
        }

        // Add the requesting user as the author. This may have already 
        // happened above.
        try {
            getJdbcTemplate().update(SQL_INSERT_USER_ROLE_CAMPAIGN, creatorUsername, campaign.getId(),
                    Campaign.Role.AUTHOR.toString());
        } catch (org.springframework.dao.DataIntegrityViolationException e) {
            // The user was already an author of this campaign implying 
            // that it's one of the default campaign roles based on a class
            // role that the 'creatorUsername' has.
            e.printStackTrace();
        } catch (org.springframework.dao.DataAccessException e) {
            transactionManager.rollback(status);
            throw new DataAccessException("Error executing SQL '" + SQL_INSERT_USER_ROLE_CAMPAIGN
                    + "' with parameters: " + creatorUsername + ", " + campaign.getId() + ", "
                    + Campaign.Role.AUTHOR.toString(), e);
        }

        // Commit the transaction.
        try {
            transactionManager.commit(status);
        } catch (TransactionException e) {
            transactionManager.rollback(status);
            throw new DataAccessException("Error while committing the transaction.", e);
        }
    } catch (TransactionException e) {
        throw new DataAccessException("Error while attempting to rollback the transaction.", e);
    }
}

From source file:org.apache.hyracks.api.rewriter.runtime.SuperActivityOperatorNodePushable.java

private void init() throws HyracksDataException {
    Map<ActivityId, IOperatorNodePushable> startOperatorNodePushables = new HashMap<ActivityId, IOperatorNodePushable>();
    Queue<Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> childQueue = new LinkedList<Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>>();
    List<IConnectorDescriptor> outputConnectors = null;

    /**/*  ww w.j  ava 2 s . c o  m*/
     * Set up the source operators
     */
    for (Entry<ActivityId, IActivity> entry : startActivities.entrySet()) {
        IOperatorNodePushable opPushable = entry.getValue().createPushRuntime(ctx, recordDescProvider,
                partition, nPartitions);
        startOperatorNodePushables.put(entry.getKey(), opPushable);
        operatorNodePushablesBFSOrder.add(opPushable);
        operatorNodePushables.put(entry.getKey(), opPushable);
        inputArity += opPushable.getInputArity();
        outputConnectors = parent.getActivityOutputMap().get(entry.getKey());
        if (outputConnectors != null) {
            for (IConnectorDescriptor conn : outputConnectors) {
                childQueue.add(parent.getConnectorActivityMap().get(conn.getConnectorId()));
            }
        }
    }

    /**
     * Using BFS (breadth-first search) to construct to runtime execution
     * DAG;
     */
    while (childQueue.size() > 0) {
        /**
         * construct the source to destination information
         */
        Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> channel = childQueue.poll();
        ActivityId sourceId = channel.getLeft().getLeft().getActivityId();
        int outputChannel = channel.getLeft().getRight();
        ActivityId destId = channel.getRight().getLeft().getActivityId();
        int inputChannel = channel.getRight().getRight();
        IOperatorNodePushable sourceOp = operatorNodePushables.get(sourceId);
        IOperatorNodePushable destOp = operatorNodePushables.get(destId);
        if (destOp == null) {
            destOp = channel.getRight().getLeft().createPushRuntime(ctx, recordDescProvider, partition,
                    nPartitions);
            operatorNodePushablesBFSOrder.add(destOp);
            operatorNodePushables.put(destId, destOp);
        }

        /**
         * construct the dataflow connection from a producer to a consumer
         */
        sourceOp.setOutputFrameWriter(outputChannel, destOp.getInputFrameWriter(inputChannel),
                recordDescProvider.getInputRecordDescriptor(destId, inputChannel));

        /**
         * traverse to the child of the current activity
         */
        outputConnectors = parent.getActivityOutputMap().get(destId);

        /**
         * expend the executing activities further to the downstream
         */
        if (outputConnectors != null && outputConnectors.size() > 0) {
            for (IConnectorDescriptor conn : outputConnectors) {
                if (conn != null) {
                    childQueue.add(parent.getConnectorActivityMap().get(conn.getConnectorId()));
                }
            }
        }
    }

    // Sets the startedInitialization flags to be false.
    startedInitialization = new boolean[operatorNodePushablesBFSOrder.size()];
    Arrays.fill(startedInitialization, false);
}

From source file:com.mirth.connect.plugins.datapruner.DataPruner.java

@Override
public void run() {
    try {//from w ww  .  java 2 s  .c  o  m
        logger.debug("Executing pruner, started at "
                + new SimpleDateFormat("MM/dd/yyyy hh:mm aa").format(Calendar.getInstance().getTime()));

        if (pruneEvents) {
            pruneEvents();
        }

        String date = new SimpleDateFormat(MessageWriterFactory.ARCHIVE_DATE_PATTERN)
                .format(Calendar.getInstance().getTime());
        String archiveFolder = (archiveEnabled) ? archiverOptions.getRootFolder() + IOUtils.DIR_SEPARATOR + date
                : null;
        Queue<PrunerTask> taskQueue;

        try {
            taskQueue = buildTaskQueue();
        } catch (Exception e) {
            // the error should already be logged
            return;
        }

        logger.debug("Pruner task queue built, " + taskQueue.size() + " channels will be processed");

        Map<String, String> attributes = new HashMap<String, String>();
        if (taskQueue.isEmpty()) {
            attributes.put("No messages to prune.", "");
            eventController.dispatchEvent(new ServerEvent(serverId, DataPrunerService.PLUGINPOINT,
                    Level.INFORMATION, Outcome.SUCCESS, attributes));
        }

        while (!taskQueue.isEmpty()) {
            ThreadUtils.checkInterruptedStatus();
            PrunerTask task = taskQueue.poll();

            try {
                status.setCurrentChannelId(task.getChannelId());
                status.setCurrentChannelName(task.getChannelName());
                status.setTaskStartTime(Calendar.getInstance());

                PruneResult result = pruneChannel(task.getChannelId(), task.getChannelName(),
                        task.getMessageDateThreshold(), task.getContentDateThreshold(), archiveFolder,
                        task.isArchiveEnabled());

                status.getProcessedChannelIds().add(task.getChannelId());

                attributes.put("Channel ID", task.getChannelId());
                attributes.put("Channel Name", task.getChannelName());

                if (archiveEnabled && task.isArchiveEnabled()) {
                    attributes.put("Messages Archived", Long.toString(result.numMessagesArchived));
                }

                attributes.put("Messages Pruned", Long.toString(result.numMessagesPruned));
                attributes.put("Content Rows Pruned", Long.toString(result.numContentPruned));
                attributes.put("Time Elapsed", getTimeElapsed());
                eventController.dispatchEvent(new ServerEvent(serverId, DataPrunerService.PLUGINPOINT,
                        Level.INFORMATION, Outcome.SUCCESS, attributes));
            } catch (InterruptedException e) {
                throw e;
            } catch (Exception e) {
                status.getFailedChannelIds().add(task.getChannelId());

                attributes.put("channel", task.getChannelName());
                attributes.put("error", e.getMessage());
                attributes.put("trace", ExceptionUtils.getStackTrace(e));
                eventController.dispatchEvent(new ServerEvent(serverId, DataPrunerService.PLUGINPOINT,
                        Level.ERROR, Outcome.FAILURE, attributes));
                Throwable t = e;
                if (e instanceof DataPrunerException) {
                    t = e.getCause();
                }
                logger.error("Failed to prune messages for channel " + task.getChannelName() + " ("
                        + task.getChannelId() + ").", t);
            } finally {
                status.getPendingChannelIds().remove(task.getChannelId());
                status.setCurrentChannelId(null);
                status.setCurrentChannelName(null);
            }
        }

        logger.debug("Pruner job finished executing");
    } catch (InterruptedException e) {
        // We need to clear this thread's interrupted status, or else the EventController will fail to dispatch the event
        Thread.interrupted();
        ServerEvent event = new ServerEvent(serverId, DataPrunerService.PLUGINPOINT + " Halted");
        event.setLevel(Level.INFORMATION);
        event.setOutcome(Outcome.SUCCESS);
        eventController.dispatchEvent(event);
        logger.debug("Data Pruner halted");
    } catch (Throwable t) {
        logger.error("An error occurred while executing the data pruner", t);
    } finally {
        status.setEndTime(Calendar.getInstance());
        lastStatus = SerializationUtils.clone(status);
        running.set(false);
    }
}

From source file:cn.edu.bjtu.cit.recommender.Recommender.java

@SuppressWarnings("unchecked")
public int run(String[] args) throws Exception {
    if (args.length < 2) {
        System.err.println();//from   w  w  w .  ja  va2s.c om
        System.err.println("Usage: " + this.getClass().getName()
                + " [generic options] input output [profiling] [estimation] [clustersize]");
        System.err.println();
        printUsage();
        GenericOptionsParser.printGenericCommandUsage(System.err);

        return 1;
    }
    OptionParser parser = new OptionParser(args);

    Pipeline pipeline = new MRPipeline(Recommender.class, getConf());

    if (parser.hasOption(CLUSTER_SIZE)) {
        pipeline.getConfiguration().setInt(ClusterOracle.CLUSTER_SIZE,
                Integer.parseInt(parser.getOption(CLUSTER_SIZE).getValue()));
    }

    if (parser.hasOption(PROFILING)) {
        pipeline.getConfiguration().setBoolean(Profiler.IS_PROFILE, true);
        this.profileFilePath = parser.getOption(PROFILING).getValue();

    }

    if (parser.hasOption(ESTIMATION)) {
        estFile = parser.getOption(ESTIMATION).getValue();
        est = new Estimator(estFile, clusterSize);
    }

    if (parser.hasOption(OPT_REDUCE)) {
        pipeline.getConfiguration().setBoolean(OPT_REDUCE, true);
    }

    if (parser.hasOption(OPT_MSCR)) {
        pipeline.getConfiguration().setBoolean(OPT_MSCR, true);
    }

    if (parser.hasOption(ACTIVE_THRESHOLD)) {
        threshold = Integer.parseInt(parser.getOption("at").getValue());
    }

    if (parser.hasOption(TOP)) {
        top = Integer.parseInt(parser.getOption("top").getValue());
    }

    profiler = new Profiler(pipeline);
    /*
     * input node
     */
    PCollection<String> lines = pipeline.readTextFile(args[0]);

    if (profiler.isProfiling() && lines.getSize() > 10 * 1024 * 1024) {
        lines = lines.sample(0.1);
    }

    /*
     * S0 + GBK
     */
    PGroupedTable<Long, Long> userWithPrefs = lines.parallelDo(new MapFn<String, Pair<Long, Long>>() {

        @Override
        public Pair<Long, Long> map(String input) {
            String[] split = input.split(Estimator.DELM);
            long userID = Long.parseLong(split[0]);
            long itemID = Long.parseLong(split[1]);
            return Pair.of(userID, itemID);
        }

        @Override
        public float scaleFactor() {
            return est.getScaleFactor("S0").sizeFactor;
        }

        @Override
        public float scaleFactorByRecord() {
            return est.getScaleFactor("S0").recsFactor;
        }
    }, Writables.tableOf(Writables.longs(), Writables.longs())).groupByKey(est.getClusterSize());

    /*
     * S1
     */
    PTable<Long, Vector> userVector = userWithPrefs
            .parallelDo(new MapFn<Pair<Long, Iterable<Long>>, Pair<Long, Vector>>() {
                @Override
                public Pair<Long, Vector> map(Pair<Long, Iterable<Long>> input) {
                    Vector userVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
                    for (long itemPref : input.second()) {
                        userVector.set((int) itemPref, 1.0f);
                    }
                    return Pair.of(input.first(), userVector);
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S1").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S1").recsFactor;
                }
            }, Writables.tableOf(Writables.longs(), Writables.vectors()));

    userVector = profiler.profile("S0-S1", pipeline, userVector, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S2
     */
    PTable<Long, Vector> filteredUserVector = userVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, Vector>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Long, Vector>> emitter) {
                    if (input.second().getNumNondefaultElements() > threshold) {
                        emitter.emit(input);
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S2").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S2").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), Writables.vectors()));

    filteredUserVector = profiler.profile("S2", pipeline, filteredUserVector, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S3 + GBK
     */
    PGroupedTable<Integer, Integer> coOccurencePairs = filteredUserVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, Integer>>() {
                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, Integer>> emitter) {
                    Iterator<Vector.Element> it = input.second().iterateNonZero();
                    while (it.hasNext()) {
                        int index1 = it.next().index();
                        Iterator<Vector.Element> it2 = input.second().iterateNonZero();
                        while (it2.hasNext()) {
                            int index2 = it2.next().index();
                            emitter.emit(Pair.of(index1, index2));
                        }
                    }
                }

                @Override
                public float scaleFactor() {
                    float size = est.getScaleFactor("S3").sizeFactor;
                    return size;
                }

                @Override
                public float scaleFactorByRecord() {
                    float recs = est.getScaleFactor("S3").recsFactor;
                    return recs;
                }
            }, Writables.tableOf(Writables.ints(), Writables.ints())).groupByKey(est.getClusterSize());

    /*
     * S4
     */
    PTable<Integer, Vector> coOccurenceVector = coOccurencePairs
            .parallelDo(new MapFn<Pair<Integer, Iterable<Integer>>, Pair<Integer, Vector>>() {
                @Override
                public Pair<Integer, Vector> map(Pair<Integer, Iterable<Integer>> input) {
                    Vector cooccurrenceRow = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
                    for (int itemIndex2 : input.second()) {
                        cooccurrenceRow.set(itemIndex2, cooccurrenceRow.get(itemIndex2) + 1.0);
                    }
                    return Pair.of(input.first(), cooccurrenceRow);
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S4").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S4").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), Writables.vectors()));

    coOccurenceVector = profiler.profile("S3-S4", pipeline, coOccurenceVector, ProfileConverter.int_vector(),
            Writables.tableOf(Writables.ints(), Writables.vectors()));

    /*
     * S5 Wrapping co-occurrence columns
     */
    PTable<Integer, VectorOrPref> wrappedCooccurrence = coOccurenceVector
            .parallelDo(new MapFn<Pair<Integer, Vector>, Pair<Integer, VectorOrPref>>() {

                @Override
                public Pair<Integer, VectorOrPref> map(Pair<Integer, Vector> input) {
                    return Pair.of(input.first(), new VectorOrPref(input.second()));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S5").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S5").recsFactor;
                }

            }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    wrappedCooccurrence = profiler.profile("S5", pipeline, wrappedCooccurrence, ProfileConverter.int_vopv(),
            Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    /*
     * S6 Splitting user vectors
     */
    PTable<Integer, VectorOrPref> userVectorSplit = filteredUserVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, VectorOrPref>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, VectorOrPref>> emitter) {
                    long userID = input.first();
                    Vector userVector = input.second();
                    Iterator<Vector.Element> it = userVector.iterateNonZero();
                    while (it.hasNext()) {
                        Vector.Element e = it.next();
                        int itemIndex = e.index();
                        float preferenceValue = (float) e.get();
                        emitter.emit(Pair.of(itemIndex, new VectorOrPref(userID, preferenceValue)));
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S6").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S6").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    userVectorSplit = profiler.profile("S6", pipeline, userVectorSplit, ProfileConverter.int_vopp(),
            Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    /*
     * S7 Combine VectorOrPrefs
     */
    PTable<Integer, VectorAndPrefs> combinedVectorOrPref = wrappedCooccurrence.union(userVectorSplit)
            .groupByKey(est.getClusterSize())
            .parallelDo(new DoFn<Pair<Integer, Iterable<VectorOrPref>>, Pair<Integer, VectorAndPrefs>>() {

                @Override
                public void process(Pair<Integer, Iterable<VectorOrPref>> input,
                        Emitter<Pair<Integer, VectorAndPrefs>> emitter) {
                    Vector vector = null;
                    List<Long> userIDs = Lists.newArrayList();
                    List<Float> values = Lists.newArrayList();
                    for (VectorOrPref vop : input.second()) {
                        if (vector == null) {
                            vector = vop.getVector();
                        }
                        long userID = vop.getUserID();
                        if (userID != Long.MIN_VALUE) {
                            userIDs.add(vop.getUserID());
                        }
                        float value = vop.getValue();
                        if (!Float.isNaN(value)) {
                            values.add(vop.getValue());
                        }
                    }
                    emitter.emit(Pair.of(input.first(), new VectorAndPrefs(vector, userIDs, values)));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S7").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S7").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs()));

    combinedVectorOrPref = profiler.profile("S5+S6-S7", pipeline, combinedVectorOrPref,
            ProfileConverter.int_vap(), Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs()));
    /*
     * S8 Computing partial recommendation vectors
     */
    PTable<Long, Vector> partialMultiply = combinedVectorOrPref
            .parallelDo(new DoFn<Pair<Integer, VectorAndPrefs>, Pair<Long, Vector>>() {
                @Override
                public void process(Pair<Integer, VectorAndPrefs> input, Emitter<Pair<Long, Vector>> emitter) {
                    Vector cooccurrenceColumn = input.second().getVector();
                    List<Long> userIDs = input.second().getUserIDs();
                    List<Float> prefValues = input.second().getValues();
                    for (int i = 0; i < userIDs.size(); i++) {
                        long userID = userIDs.get(i);
                        if (userID != Long.MIN_VALUE) {
                            float prefValue = prefValues.get(i);
                            Vector partialProduct = cooccurrenceColumn.times(prefValue);
                            emitter.emit(Pair.of(userID, partialProduct));
                        }
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S8").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S8").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), Writables.vectors())).groupByKey(est.getClusterSize())
            .combineValues(new CombineFn<Long, Vector>() {

                @Override
                public void process(Pair<Long, Iterable<Vector>> input, Emitter<Pair<Long, Vector>> emitter) {
                    Vector partial = null;
                    for (Vector vector : input.second()) {
                        partial = partial == null ? vector : partial.plus(vector);
                    }
                    emitter.emit(Pair.of(input.first(), partial));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("combine").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("combine").recsFactor;
                }
            });

    partialMultiply = profiler.profile("S8-combine", pipeline, partialMultiply, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S9 Producing recommendations from vectors
     */
    PTable<Long, RecommendedItems> recommendedItems = partialMultiply
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, RecommendedItems>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Long, RecommendedItems>> emitter) {
                    Queue<RecommendedItem> topItems = new PriorityQueue<RecommendedItem>(11,
                            Collections.reverseOrder(BY_PREFERENCE_VALUE));
                    Iterator<Vector.Element> recommendationVectorIterator = input.second().iterateNonZero();
                    while (recommendationVectorIterator.hasNext()) {
                        Vector.Element element = recommendationVectorIterator.next();
                        int index = element.index();
                        float value = (float) element.get();
                        if (topItems.size() < top) {
                            topItems.add(new GenericRecommendedItem(index, value));
                        } else if (value > topItems.peek().getValue()) {
                            topItems.add(new GenericRecommendedItem(index, value));
                            topItems.poll();
                        }
                    }
                    List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size());
                    recommendations.addAll(topItems);
                    Collections.sort(recommendations, BY_PREFERENCE_VALUE);
                    emitter.emit(Pair.of(input.first(), new RecommendedItems(recommendations)));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S9").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S9").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems()));

    recommendedItems = profiler.profile("S9", pipeline, recommendedItems, ProfileConverter.long_ri(),
            Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems()));

    /*
     * Profiling
     */
    if (profiler.isProfiling()) {
        profiler.writeResultToFile(profileFilePath);
        profiler.cleanup(pipeline.getConfiguration());
        return 0;
    }
    /*
     * asText
     */
    pipeline.writeTextFile(recommendedItems, args[1]);
    PipelineResult result = pipeline.done();
    return result.succeeded() ? 0 : 1;
}

From source file:org.shaman.terrain.polygonal.PolygonalMapGenerator.java

private void createBiomes() {
    if (graph == null) {
        return;//from  w  ww  .j  a  v a 2 s . co m
    }

    //assign temperatures
    for (Graph.Corner c : graph.corners) {
        c.temperature = c.elevation;
        c.temperature *= c.temperature;
        c.temperature = 1 - c.temperature;
    }
    assignCenterTemperature();

    //create random rivers
    Random rand = new Random(seed * 3);
    for (Graph.Corner c : graph.corners) {
        c.river = 0;
    }
    float riverProb = 0.2f;
    float riverStartHeight = 0.7f;
    int riverCounter = 0;
    corner: for (Graph.Corner c : graph.corners) {
        if (c.water || c.elevation < riverStartHeight) {
            continue;
        }
        if (rand.nextFloat() > riverProb) {
            continue;
        }
        if (c.river > 0)
            continue;
        for (Graph.Corner c2 : c.adjacent) {
            if (c2.river > 0) {
                continue corner;
            }
            for (Graph.Corner c3 : c2.adjacent) {
                if (c3.river > 0) {
                    continue corner;
                }
            }
        }
        //start new river from here
        Graph.Corner current = c;
        current.river = Math.max(current.river, 1);
        while (!current.ocean && !current.coast) {
            float minH = current.elevation;
            Graph.Corner minC = null;
            for (Graph.Corner c2 : current.adjacent) {
                if (c2.river > 0 && c2.elevation < current.elevation) {
                    minC = c2; //force closing of rivers
                    break;
                }
                if (c2.elevation < minH) {
                    minC = c2;
                    minH = c2.elevation;
                }
            }
            if (minC == null) {
                LOG.warning("river stuck in a local minima without reaching the ocean");
                break;
            }
            minC.river = Math.max(minC.river, current.river + 1);
            current = minC;
        }
        riverCounter++;
    }
    LOG.info("count of created rivers: " + riverCounter);
    showRivers = true;

    //assign moisture
    Queue<Graph.Corner> queue = new ArrayDeque<>();
    for (Graph.Corner q : graph.corners) {
        if ((q.water || q.river > 0) && !q.ocean) {
            q.moisture = q.river > 0 ? Math.min(3.0f, (0.4f * q.river)) : 1;
            queue.add(q);
        } else {
            q.moisture = 0;
        }
    }
    while (!queue.isEmpty()) {
        Graph.Corner q = queue.poll();
        for (Graph.Corner r : q.adjacent) {
            float newMoisture = q.moisture * 0.8f;
            if (newMoisture > r.moisture) {
                r.moisture = newMoisture;
                queue.add(r);
            }
        }
    }
    for (Graph.Corner q : graph.corners) {
        if (q.ocean || q.coast) {
            q.moisture = 1;
        }
    }

    //redistribute moisture
    ArrayList<Graph.Corner> corners = new ArrayList<>();
    for (Graph.Corner q : graph.corners) {
        if (!q.ocean && !q.coast) {
            corners.add(q);
        }
    }
    Collections.sort(corners, new Comparator<Graph.Corner>() {
        @Override
        public int compare(Graph.Corner o1, Graph.Corner o2) {
            return Float.compare(o1.moisture, o2.moisture);
        }
    });
    for (int i = 0; i < corners.size(); i++) {
        corners.get(i).moisture = i / (float) (corners.size() - 1);
    }
    assignCenterMoisture();

    assignBiomes();

    //update mesh
    updateTemperatureGeometry();
    updateMoistureGeometry();
    updateBiomesGeometry();
}

From source file:org.archive.crawler.frontier.WorkQueueFrontier.java

/**
 * Activate an inactive queue, if any are available. 
 *///from   w w w  . ja v  a 2  s.  co  m
protected boolean activateInactiveQueue() {
    for (Entry<Integer, Queue<String>> entry : getInactiveQueuesByPrecedence().entrySet()) {
        int expectedPrecedence = entry.getKey();
        Queue<String> queueOfWorkQueueKeys = entry.getValue();

        while (true) {
            String workQueueKey;
            synchronized (getInactiveQueuesByPrecedence()) {
                workQueueKey = queueOfWorkQueueKeys.poll();
                if (workQueueKey == null) {
                    break;
                }
                updateHighestWaiting(expectedPrecedence);
            }

            WorkQueue candidateQ = (WorkQueue) this.allQueues.get(workQueueKey);
            if (candidateQ.getPrecedence() > expectedPrecedence) {
                // queue demoted since placed; re-deactivate
                deactivateQueue(candidateQ);
                candidateQ.makeDirty();
                continue;
            }

            try {
                readyClassQueues.put(workQueueKey);
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            }

            return true;
        }
    }

    return false;
}

From source file:com.koda.integ.hbase.storage.FileExtStorage.java

/**
 * Get existing file.//from  w  w w. j a  v  a  2s.  c  om
 *
 * @param id the id
 * @return file
 */

public RandomAccessFile getFile(int id) {

    if (existedIds.containsKey((long) id) == false) {
        return null;
    }

    Queue<RandomAccessFile> fileReaders = readers.get(id);
    if (fileReaders == null) {
        if (existedIds.containsKey((long) id) == false) {
            return null;
        }
        fileReaders = new ArrayBlockingQueue<RandomAccessFile>(maxOpenFD);
        readers.putIfAbsent(id, fileReaders);
    }
    fileReaders = readers.get(id);

    if (fileReaders == null) {
        return null;
    }

    RandomAccessFile raf = fileReaders.poll();

    if (raf == null) {
        raf = openFile(id, "r");
    }
    return raf;
}

From source file:org.rhq.enterprise.server.cloud.StorageNodeManagerBean.java

private Map<Integer, Integer> findResourcesWithAlertsToStorageNodeMap(StorageNode storageNode) {
    Stopwatch stopwatch = stopwatchStart();
    List<StorageNode> initialStorageNodes = getStorageNodes();
    try {/*  w w w  .  ja  v  a  2 s . co m*/
        if (storageNode == null) {
            initialStorageNodes = getStorageNodes();
        } else {
            initialStorageNodes = Arrays.asList(storageNode.getResource() == null
                    ? entityManager.find(StorageNode.class, storageNode.getId())
                    : storageNode);
        }

        Map<Integer, Integer> resourceIdsToStorageNodeMap = new HashMap<Integer, Integer>();
        Queue<Resource> unvisitedResources = new LinkedList<Resource>();

        // we are assuming here that the set of resources is disjunktive across different storage nodes
        for (StorageNode initialStorageNode : initialStorageNodes) {
            if (initialStorageNode.getResource() != null) {
                unvisitedResources.add(initialStorageNode.getResource());
                while (!unvisitedResources.isEmpty()) {
                    Resource resource = unvisitedResources.poll();
                    if (!resource.getAlertDefinitions().isEmpty()) {
                        resourceIdsToStorageNodeMap.put(resource.getId(), initialStorageNode.getId());
                    }

                    Set<Resource> childResources = resource.getChildResources();
                    if (childResources != null) {
                        for (Resource child : childResources) {
                            unvisitedResources.add(child);
                        }
                    }
                }
            }
        }

        return resourceIdsToStorageNodeMap;
    } finally {
        if (log.isDebugEnabled()) {
            stopwatchEnd(stopwatch, "Found storage node resources with alert defs in ");
        }
    }
}