Example usage for java.util Set toArray

List of usage examples for java.util Set toArray

Introduction

In this page you can find the example usage for java.util Set toArray.

Prototype

Object[] toArray();

Source Link

Document

Returns an array containing all of the elements in this set.

Usage

From source file:com.headstrong.fusion.core.recovery.ProcessRecoveryContext.java

/**
 * Returns the unprocessed messages for the process.
 * /*w w  w .j av a2  s. co m*/
 * @param processId process identifier.
 * @return List of unprocessed messages.
 * @throws Exception 
 */
public List<Recoverable> getUnProcessedMessages(ProcessContext processContext) throws Exception {

    RecoveryDataStore recStore = this.getRecoveryDataStore();

    List<RecoveryMessage> recoveryMessages = null;

    // Need to handle multicast process differently as messages to recover
    // is no longer only a difference of reclog and processed table.
    if (((CamelProcessContext) processContext).isMulticastProcess()) {

        List<RouterConfig> routerList = processContext.getProcessConfigurationModel().getRouters();

        recoveryMessages = new ArrayList<RecoveryMessage>();
        // Get the messages which has reached the reclog but not the
        // processed table at all.
        recoveryMessages
                .addAll(recStore.getUnProcessedMulticastRecoveryMessages(processContext.getProcessId(), null));

        for (RouterConfig routerConfig : routerList) {
            if (routerConfig instanceof MultiCastRouterConfig) {
                recoveryMessages.addAll(recStore.getUnProcessedMulticastRecoveryMessages(
                        processContext.getProcessId(), routerConfig.getId()));
            }
        }

    } else {
        // list of messages yet persisted in the recovery store.
        recoveryMessages = recStore.getUnProcessedRecoverMessages(processContext.getProcessId());
    }

    // for each processed message
    // assumption is the table will contain a single record for each message
    // id .
    List<Recoverable> messagesUndelivered = new ArrayList<Recoverable>();
    if (recoveryMessages != null) {

        for (RecoveryMessage recoveryMessage : recoveryMessages) {

            if (((CamelProcessContext) processContext).isMulticastProcess()) {
                List<RouterConfig> routerList = processContext.getProcessConfigurationModel().getRouters();

                Set<String> processedEndpointSet = ((MulticastRecoveryMessage) recoveryMessage)
                        .getEndPointSet();
                Set<String> actualSet = null;

                // Finding the partially processed ones among the messages returned by query 2.
                for (RouterConfig routerConfig : routerList) {
                    if (routerConfig instanceof MultiCastRouterConfig) {
                        if (processedEndpointSet.size() > 0) {
                            actualSet = new HashSet<String>();
                            for (EndPointSequence target : ((MultiCastRouterConfig) routerConfig)
                                    .getTargets()) {
                                actualSet.add(target.isDeadEnd() ? target.getEndPointId()
                                        : getDestinationEndpoint(processContext, target.getEndPointId()));
                            }
                        }
                    }
                }
                if (processedEndpointSet.size() > 0 && processedEndpointSet.equals(actualSet)) {
                    continue;
                }

            }

            // the message is already processed ..
            Recoverable recoverable = new Recoverable();
            recoverable.setProcessId(recoveryMessage.getProcessId());
            recoverable.setMessageId(recoveryMessage.getMessageId());
            recoverable.setSavepoint(recoveryMessage.getSavePoint());
            recoverable.setSessionId(processContext.getProcessRunId());
            ByteArrayInputStream bis = new ByteArrayInputStream(recoveryMessage.getData());
            // FIXME :: this defeats the purpose of having
            // ProcessContext interface

            Exchange exchange = new DefaultExchange(((CamelProcessContext) processContext).getCamelContext());

            ExchangeConverter.read(exchange, bis);
            recoverable.setExchange(exchange);
            if (recoveryMessage instanceof MulticastRecoveryMessage) {
                Set<String> processedEndPoints = ((MulticastRecoveryMessage) recoveryMessage).getEndPointSet();
                if (processedEndPoints != null && !processedEndPoints.isEmpty()) {
                    String separatedProcessedEndpoints = StringUtils.join(processedEndPoints.toArray(), ",");
                    exchange.getIn().setHeader("processedEndpoints", separatedProcessedEndpoints);
                }
            }
            messagesUndelivered.add(recoverable);

            // #Redmine Bug 664: To ensure that during recovery the
            // undelivered messages,
            // retain the right session id for a complete run. Prior to
            // this fix old session id was maintained in message_reclog table.
            recoveryMessage.setSessionId(processContext.getProcessRunId());
            recoveryMessage.setTimeStamp(new Date());
            recStore.saveRecoveryMessage(recoveryMessage);
        }
    }
    return messagesUndelivered;
}

From source file:de.tudarmstadt.ukp.dkpro.lexsemresource.graph.AdjMatrixRandomWalkJUNG.java

/**
 * Randomly picks a child of the given source node, and burns it, i.e. puts
 * it into the graph, along with the corresponding edge
 *
 * @param source/* ww  w  .  j a  v  a  2s . co  m*/
 * @param graph
 * @throws LexicalSemanticResourceException
 * @throws UnsupportedOperationException
 * @return The child burned, if the source has children. The source itself,
 *         otherwise.
 */
private Entity burnChild(Entity sourceVertex, DirectedGraph<Entity, EntityGraphEdge> graph)
        throws LexicalSemanticResourceException {

    // Set<Entity> children =
    // lexSemResource.getChildren(sourceVertex.getVertexEntity());
    Set<Entity> children = adjMatrix.getAdjacencies(sourceVertex);

    int numChildren = children.size();
    Entity child = null;
    Random generator = new Random();

    if (numChildren != 0) {

        // randomly select one of the children:
        int pickedChild = generator.nextInt(numChildren);
        child = (Entity) children.toArray()[pickedChild];

        // check if the child is already in the graph. If not, create it:
        if (!graph.getVertices().contains(child)) {

            graph.addVertex(child);
            // logger.info("Burned " +
            // childVertex.getVertexEntity().toString());

            // graph.addEdge(new DirectedSparseEdge(sourceVertex,
            // childVertex));
            // edgeSet.add("(" + sourceVertex.getVertexEntity() + ", " +
            // child + ")");
            graphSize++;
        }

        // if the child is already in the graph, find it:

        // now check for edge existence. if edge not there, add it
        if (graph.findEdge(sourceVertex, child) == null) {
            graph.addEdge(new EntityGraphEdge(sourceVertex, child), sourceVertex, child);
        }

        return child;
    } else {
        return sourceVertex;
    }
}

From source file:com.amalto.workbench.actions.XSDAddComplexTypeElementAction.java

public void addAnnotion(XSDAnnotationsStructure struc, XSDAnnotation xsdannotationparent) {
    Map<String, List<String>> infor = new HashMap<String, List<String>>();
    infor = cloneXSDAnnotation(xsdannotationparent);
    Set<String> keys = infor.keySet();
    for (int i = 0; i < infor.size(); i++) {
        List<String> lists = infor.get(keys.toArray()[i]);
        try {//from w  w  w. j  a va  2  s .c  o  m
            struc.setAccessRole(lists, false,
                    (IStructuredContentProvider) page.getTreeViewer().getContentProvider(),
                    (String) keys.toArray()[i]);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
    }
}

From source file:gobblin.util.PullFileLoader.java

public PullFileLoader(Path rootDirectory, FileSystem fs, Collection<String> javaPropsPullFileExtensions,
        Collection<String> hoconPullFileExtensions) {

    Set<String> commonExtensions = Sets.intersection(Sets.newHashSet(javaPropsPullFileExtensions),
            Sets.newHashSet(hoconPullFileExtensions));
    Preconditions.checkArgument(commonExtensions.isEmpty(),
            "Java props and HOCON pull file extensions intersect: "
                    + Arrays.toString(commonExtensions.toArray()));

    this.rootDirectory = rootDirectory;
    this.fs = fs;
    this.javaPropsPullFileFilter = new ExtensionFilter(javaPropsPullFileExtensions);
    this.hoconPullFileFilter = new ExtensionFilter(hoconPullFileExtensions);
}

From source file:edu.pitt.dbmi.ccd.queue.service.AlgorithmSlurmService.java

public Future<Void> submitJobtoSlurm(JobQueueInfo jobQueueInfo) {
    Long queueId = jobQueueInfo.getId();
    String fileName = jobQueueInfo.getFileName() + ".txt";
    String commands = jobQueueInfo.getCommands();
    String tmpDirectory = jobQueueInfo.getTmpDirectory();

    Properties p = new Properties();

    // Upload dataset(s) to the remote data storage
    Set<UserAccount> userAccounts = jobQueueInfo.getUserAccounts();
    UserAccount userAccount = (UserAccount) userAccounts.toArray()[0];
    String username = userAccount.getUsername();

    Path checkUserDirTemplate = Paths.get(workspace, jobTemplates, checkUserDir);
    String checkUserDirTemplateDir = checkUserDirTemplate.toAbsolutePath().toString();

    p.setProperty("causalUser", username);
    p.setProperty("tmp", tempFolder);
    p.setProperty("results", resultFolder);
    p.setProperty("algorithm", algorithmResultFolder);

    String partition = hpcPartition;
    int walltime = hpcWallTime;

    Set<HpcParameter> hpcParameters = jobQueueInfo.getHpcParameters();
    if (hpcParameters != null && !hpcParameters.isEmpty()) {
        for (HpcParameter param : hpcParameters) {
            if (param.getParameterKey().equalsIgnoreCase("partition")) {
                partition = param.getParameterValue();
            }/*w  ww .  ja v  a 2  s .c o  m*/
            String key = param.getParameterKey();
            String value = param.getParameterValue();
            switch (key) {
            case "partition":
                partition = value;
                break;
            case "walltime":
                walltime = Integer.parseInt(value);
                break;
            }
        }
    }

    p.setProperty("partition", partition);
    p.setProperty("walltime", String.format("%02d:00:00", walltime));

    List<String> cmdList = new LinkedList<>();
    cmdList.addAll(Arrays.asList(commands.split(";")));

    String datasets = null;
    for (int i = 0; i < cmdList.size(); i++) {
        String cmd = cmdList.get(i);
        if (cmd.equalsIgnoreCase("--data")) {
            datasets = cmdList.get(i + 1);
            break;
        }
    }

    List<String> datasetList = new LinkedList<>();
    datasetList.addAll(Arrays.asList(datasets.split(",")));
    // The current dataset path is the one on the grid
    datasetList.forEach(dataset -> {
        // Extract fileName from the dataset
        Path dataPath = Paths.get(remotedataspace, username, dataFolder);
        String dataFile = dataset.replace(dataPath.toAbsolutePath().toString(), "");

        //The dataset's source path
        dataPath = Paths.get(workspace, username, dataFolder, dataFile);
        Path scriptPath = Paths.get(remoteworkspace, checkUserDirScript);
        String scriptDir = scriptPath.toAbsolutePath().toString() + username + ".sh";
        LOGGER.info("submitJobtoSlurm: checkUserDirScript: " + scriptDir);
        try {
            client.uploadFile(checkUserDirTemplateDir, p, scriptDir, dataPath.toAbsolutePath().toString(),
                    dataset);
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    });

    String knowledges = null;
    for (int i = 0; i < cmdList.size(); i++) {
        String cmd = cmdList.get(i);
        if (cmd.equalsIgnoreCase("--knowledge")) {
            knowledges = cmdList.get(i + 1);
            break;
        }
    }

    if (knowledges != null) {
        List<String> knowledgeList = new LinkedList<>();
        knowledgeList.addAll(Arrays.asList(knowledges.split(",")));
        knowledgeList.forEach(knowledge -> {
            // Extract fileName from the knowledge path
            Path knowledgePath = Paths.get(remotedataspace, username, dataFolder);
            String knowledgeFile = knowledge.replace(knowledgePath.toAbsolutePath().toString(), "");

            //The knowledge's source path
            knowledgePath = Paths.get(workspace, username, dataFolder, knowledgeFile);
            Path scriptPath = Paths.get(remoteworkspace, checkUserDirScript);
            String scriptDir = scriptPath.toAbsolutePath().toString() + username + ".sh";
            LOGGER.info("submitJobtoSlurm: checkUserDirScript: " + scriptDir);
            try {
                client.uploadFile(checkUserDirTemplateDir, p, scriptDir,
                        knowledgePath.toAbsolutePath().toString(), knowledge);
            } catch (Exception e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        });
    }

    cmdList.add("--out");
    cmdList.add(tmpDirectory);

    String errorFileName = String.format("error_%s", fileName);
    Path error = Paths.get(tmpDirectory, errorFileName);

    // Redirect Error to File
    cmdList.add("2>");
    cmdList.add(error.toAbsolutePath().toString());

    StringBuilder sb = new StringBuilder();
    cmdList.forEach(cmd -> {
        sb.append(cmd);
        sb.append(" ");
    });
    LOGGER.info("Algorithm command: " + sb.toString());

    try {

        // Submit a job & Get remote job Id
        p.setProperty("email", userAccount.getPerson().getEmail());
        p.setProperty("cmd", sb.toString());
        Path causalJobTemplate = Paths.get(workspace, jobTemplates, causalJob);
        String causalJobTemplateDir = causalJobTemplate.toAbsolutePath().toString();
        Path scriptPath = Paths.get(remoteworkspace, username, runSlurmJobScript);
        String scriptDir = scriptPath.toAbsolutePath().toString() + queueId + ".sh";
        LOGGER.info("submitJobtoSlurm: runSlurmJobScript: " + scriptDir);
        long pid = client.submitJob(causalJobTemplateDir, p, scriptDir);

        JobQueueInfo queuedJobInfo = jobQueueInfoService.findOne(queueId);
        LOGGER.info("Set Job's pid to be: " + pid);
        queuedJobInfo.setPid(pid);
        jobQueueInfoService.saveJobIntoQueue(queuedJobInfo);

    } catch (Exception exception) {
        LOGGER.error("Algorithm did not run successfully.", exception);
    }

    return new AsyncResult<>(null);
}

From source file:it.unibo.alchemist.language.protelis.datatype.ArrayTupleImpl.java

@Override
public Tuple intersection(final Tuple t) {
    final Set<Object> l = new HashSet<>();
    final Set<Object> lIn = new HashSet<>(Arrays.asList(a));
    for (final Object o : t) {
        if (lIn.contains(o)) {
            l.add(o);/*from   w  w  w  . ja  v  a 2  s .  c om*/
        }
    }
    return Tuples.create(l.toArray());
}

From source file:com.redhat.rhn.domain.action.config.ConfigUploadActionFormatter.java

private void displayChannels(StringBuffer buffy, Set channelSet) {
    /* most of the time, there is only going to be one channel because it
     * will usually be one server's sandbox channel.
     * Therefore, deal with one channel as a special case
     * I say this because as I write this, there is no way through the web UI
     * to schedule a config upload action for multiple servers.
     *//* w ww  .  j  av a  2 s  . c om*/
    if (channelSet.size() == 1) {
        ConfigChannel channel = ((ConfigChannelAssociation) channelSet.toArray()[0]).getConfigChannel();

        //Display will be:
        //Destination Configuration Channel: blah
        //where blah is the channel display name with link

        buffy.append(LocalizationService.getInstance().getMessage("config.upload.onechannel",
                renderChannel(channel)));
        buffy.append("<br />");
    } else if (channelSet.size() > 1) {

        buffy.append(renderHeading("config.upload.channels"));

        Iterator channels = channelSet.iterator();
        //since you can only upload files into local channels (only sandbox right now),
        //there shouldn't be multiple entries of the same channel.
        while (channels.hasNext()) {
            ConfigChannel channel = ((ConfigChannelAssociation) channels.next()).getConfigChannel();
            buffy.append(renderChannel(channel));
            buffy.append("<br />");
        }
    }
    //else don't display desination info (invalid config upload action!)
}