List of usage examples for java.util Deque addAll
boolean addAll(Collection<? extends E> c);
From source file:cz.cuni.mff.ksi.jinfer.autoeditor.automatonvisualizer.layouts.graphviz.AutomatonToDot.java
public static <T> String convertToDot(final Automaton<T> automaton, final Transformer<Step<T>, String> edgeLabelTransformer) { final StringBuilder sb = new StringBuilder(); sb.append("digraph finite_state_machine {\n"); sb.append("\trankdir=LR;\n"); sb.append("\tnodesep=\"50\";"); sb.append("\tsplines=\"line\";"); sb.append("\tranksep=\"100\";"); sb.append("\tedge [label = \"\", dir = none, arrowhead=none, arrowtail=none];"); sb.append("\tnode [shape = none, label = \"\", width = 0, height = 0];\n"); final Deque<State<T>> queue = new ArrayDeque<State<T>>(); queue.addAll(automaton.getDelta().keySet()); while (!queue.isEmpty()) { final State<T> actual = queue.removeFirst(); sb.append(actual.getName());/*from w w w. j a va 2s . c o m*/ sb.append(";\n"); for (Step<T> step : automaton.getDelta().get(actual)) { sb.append("\t"); sb.append(step.getSource().getName()); sb.append(" -> "); sb.append(step.getDestination().getName()); sb.append("];\n"); } } sb.append("\n}"); return sb.toString(); }
From source file:edu.stanford.cfuller.imageanalysistools.filter.VariableSizeMeanFilter.java
@Override public void apply(WritableImage im) { //calculate Laplacian of Image, calculate pseudo-residual (as in Boulanger, 2010) WritableImage residual = ImageFactory.createWritable(im); LaplacianFilterND LF = new LaplacianFilterND(); LF.apply(residual);/*from ww w . ja v a 2 s . c o m*/ //for 3D, residual is Laplacian divided by sqrt(56) float norm = (float) Math.sqrt(56); //for 2D, residual is sqrt(30) //norm = Math.sqrt(30); for (ImageCoordinate ic : residual) { residual.setValue(ic, residual.getValue(ic) / norm); } //perform an octtree segmentation of the Image, using a criterion based on relative variance of image and noise (as in Boulanger, 2010) OcttreeNode root = new OcttreeNode(ImageCoordinate.createCoordXYZCT(0, 0, 0, 0, 0), ImageCoordinate.cloneCoord(im.getDimensionSizes())); if (this.shouldSubDivide(root, im, residual)) { root.subDivide(); } Deque<OcttreeNode> queue = new java.util.ArrayDeque<OcttreeNode>(); queue.addAll(root.getChildren()); List<OcttreeNode> leaves = new java.util.ArrayList<OcttreeNode>(); while (!queue.isEmpty()) { OcttreeNode current = queue.pop(); if (this.shouldSubDivide(current, im, residual) && current.subDivide()) { queue.addAll(current.getChildren()); } else { leaves.add(current); } } for (OcttreeNode node : leaves) { double count = 0; float mean = 0; im.setBoxOfInterest(node.getBoxMin(), node.getBoxMax()); for (ImageCoordinate ic : im) { mean += im.getValue(ic); count++; } mean /= count; for (ImageCoordinate ic : im) { im.setValue(ic, mean); } im.clearBoxOfInterest(); } }
From source file:com.espertech.esper.epl.join.table.PropertySortedEventTable.java
private Collection<EventBean> normalizeCollection(SortedMap<Object, Set<EventBean>> submap) { if (submap.size() == 0) { return null; }/*from w ww. j a v a2 s .c o m*/ if (submap.size() == 1) { return submap.get(submap.firstKey()); } Deque<EventBean> result = new ArrayDeque<EventBean>(); for (Map.Entry<Object, Set<EventBean>> entry : submap.entrySet()) { result.addAll(entry.getValue()); } return result; }
From source file:com.cloudbees.jenkins.plugins.amazonecs.ECSService.java
/** * Looks whether the latest task definition matches the desired one. If yes, returns the ARN of the existing one. * If no, register a new task definition with desired parameters and return the new ARN. *//*from w w w . java 2 s . c o m*/ String registerTemplate(final ECSCloud cloud, final ECSTaskTemplate template, String clusterArn) { final AmazonECSClient client = getAmazonECSClient(); String familyName = fullQualifiedTemplateName(cloud, template); final ContainerDefinition def = new ContainerDefinition().withName(familyName) .withImage(template.getImage()).withEnvironment(template.getEnvironmentKeyValuePairs()) .withExtraHosts(template.getExtraHostEntries()).withMountPoints(template.getMountPointEntries()) .withCpu(template.getCpu()).withPrivileged(template.getPrivileged()).withEssential(true); /* at least one of memory or memoryReservation has to be set the form validation will highlight if the settings are inappropriate */ if (template.getMemoryReservation() > 0) /* this is the soft limit */ def.withMemoryReservation(template.getMemoryReservation()); if (template.getMemory() > 0) /* this is the hard limit */ def.withMemory(template.getMemory()); if (template.getEntrypoint() != null) def.withEntryPoint(StringUtils.split(template.getEntrypoint())); if (template.getJvmArgs() != null) def.withEnvironment(new KeyValuePair().withName("JAVA_OPTS").withValue(template.getJvmArgs())) .withEssential(true); if (template.getLogDriver() != null) { LogConfiguration logConfig = new LogConfiguration(); logConfig.setLogDriver(template.getLogDriver()); logConfig.setOptions(template.getLogDriverOptionsMap()); def.withLogConfiguration(logConfig); } String lastToken = null; Deque<String> taskDefinitions = new LinkedList<String>(); do { ListTaskDefinitionsResult listTaskDefinitions = client .listTaskDefinitions(new ListTaskDefinitionsRequest().withFamilyPrefix(familyName) .withMaxResults(100).withNextToken(lastToken)); taskDefinitions.addAll(listTaskDefinitions.getTaskDefinitionArns()); lastToken = listTaskDefinitions.getNextToken(); } while (lastToken != null); boolean templateMatchesExistingContainerDefinition = false; boolean templateMatchesExistingVolumes = false; boolean templateMatchesExistingTaskRole = false; DescribeTaskDefinitionResult describeTaskDefinition = null; if (taskDefinitions.size() > 0) { describeTaskDefinition = client.describeTaskDefinition( new DescribeTaskDefinitionRequest().withTaskDefinition(taskDefinitions.getLast())); templateMatchesExistingContainerDefinition = def .equals(describeTaskDefinition.getTaskDefinition().getContainerDefinitions().get(0)); LOGGER.log(Level.INFO, "Match on container defintion: {0}", new Object[] { templateMatchesExistingContainerDefinition }); LOGGER.log(Level.FINE, "Match on container defintion: {0}; template={1}; last={2}", new Object[] { templateMatchesExistingContainerDefinition, def, describeTaskDefinition.getTaskDefinition().getContainerDefinitions().get(0) }); templateMatchesExistingVolumes = ObjectUtils.equals(template.getVolumeEntries(), describeTaskDefinition.getTaskDefinition().getVolumes()); LOGGER.log(Level.INFO, "Match on volumes: {0}", new Object[] { templateMatchesExistingVolumes }); LOGGER.log(Level.FINE, "Match on volumes: {0}; template={1}; last={2}", new Object[] { templateMatchesExistingVolumes, template.getVolumeEntries(), describeTaskDefinition.getTaskDefinition().getVolumes() }); templateMatchesExistingTaskRole = template.getTaskrole() == null || template.getTaskrole().equals(describeTaskDefinition.getTaskDefinition().getTaskRoleArn()); LOGGER.log(Level.INFO, "Match on task role: {0}", new Object[] { templateMatchesExistingTaskRole }); LOGGER.log(Level.FINE, "Match on task role: {0}; template={1}; last={2}", new Object[] { templateMatchesExistingTaskRole, template.getTaskrole(), describeTaskDefinition.getTaskDefinition().getTaskRoleArn() }); } if (templateMatchesExistingContainerDefinition && templateMatchesExistingVolumes && templateMatchesExistingTaskRole) { LOGGER.log(Level.FINE, "Task Definition already exists: {0}", new Object[] { describeTaskDefinition.getTaskDefinition().getTaskDefinitionArn() }); return describeTaskDefinition.getTaskDefinition().getTaskDefinitionArn(); } else { final RegisterTaskDefinitionRequest request = new RegisterTaskDefinitionRequest().withFamily(familyName) .withVolumes(template.getVolumeEntries()).withContainerDefinitions(def); if (template.getTaskrole() != null) { request.withTaskRoleArn(template.getTaskrole()); } final RegisterTaskDefinitionResult result = client.registerTaskDefinition(request); String taskDefinitionArn = result.getTaskDefinition().getTaskDefinitionArn(); LOGGER.log(Level.FINE, "Created Task Definition {0}: {1}", new Object[] { taskDefinitionArn, request }); LOGGER.log(Level.INFO, "Created Task Definition: {0}", new Object[] { taskDefinitionArn }); return taskDefinitionArn; } }
From source file:org.openscore.lang.compiler.modeller.ExecutableBuilder.java
private Workflow compileWorkFlow(List<Map<String, Map<String, Object>>> workFlowRawData, Map<String, String> imports, Workflow onFailureWorkFlow, boolean onFailureSection) { Deque<Task> tasks = new LinkedList<>(); Validate.notEmpty(workFlowRawData, "Flow must have tasks in its workflow"); PeekingIterator<Map<String, Map<String, Object>>> iterator = new PeekingIterator<>( workFlowRawData.iterator()); boolean isOnFailureDefined = onFailureWorkFlow != null; String defaultFailure = isOnFailureDefined ? onFailureWorkFlow.getTasks().getFirst().getName() : FAILURE_RESULT;/*from ww w . jav a 2s.c om*/ Set<String> taskNames = new HashSet<>(); while (iterator.hasNext()) { Map<String, Map<String, Object>> taskRawData = iterator.next(); Map<String, Map<String, Object>> nextTaskData = iterator.peek(); String taskName = taskRawData.keySet().iterator().next(); if (taskNames.contains(taskName)) { throw new RuntimeException("Task name: \'" + taskName + "\' appears more than once in the workflow. Each task name in the workflow must be unique"); } taskNames.add(taskName); Map<String, Object> taskRawDataValue; String message = "Task: " + taskName + " syntax is illegal.\nBelow task name, there should be a map of values in the format:\ndo:\n\top_name:"; try { taskRawDataValue = taskRawData.values().iterator().next(); if (MapUtils.isNotEmpty(taskRawDataValue) && taskRawDataValue.containsKey(LOOP_KEY)) { message = "Task: " + taskName + " syntax is illegal.\nBelow the 'loop' keyword, there should be a map of values in the format:\nfor:\ndo:\n\top_name:"; taskRawDataValue.putAll((Map<String, Object>) taskRawDataValue.remove(LOOP_KEY)); } } catch (ClassCastException ex) { throw new RuntimeException(message); } String defaultSuccess; if (nextTaskData != null) { defaultSuccess = nextTaskData.keySet().iterator().next(); } else { defaultSuccess = onFailureSection ? FAILURE_RESULT : SUCCESS_RESULT; } Task task = compileTask(taskName, taskRawDataValue, defaultSuccess, imports, defaultFailure); tasks.add(task); } if (isOnFailureDefined) { tasks.addAll(onFailureWorkFlow.getTasks()); } return new Workflow(tasks); }
From source file:io.cloudslang.lang.compiler.modeller.ExecutableBuilder.java
private WorkflowModellingResult compileWorkFlow(List<Map<String, Map<String, Object>>> workFlowRawData, Map<String, String> imports, Workflow onFailureWorkFlow, boolean onFailureSection, String namespace, SensitivityLevel sensitivityLevel) { List<RuntimeException> errors = new ArrayList<>(); Deque<Step> steps = new LinkedList<>(); Set<String> stepNames = new HashSet<>(); Deque<Step> onFailureSteps = !(onFailureSection || onFailureWorkFlow == null) ? onFailureWorkFlow.getSteps() : new LinkedList<Step>(); List<String> onFailureStepNames = getStepNames(onFailureSteps); boolean onFailureStepFound = onFailureStepNames.size() > 0; String defaultFailure = onFailureStepFound ? onFailureStepNames.get(0) : ScoreLangConstants.FAILURE_RESULT; PeekingIterator<Map<String, Map<String, Object>>> iterator = new PeekingIterator<>( workFlowRawData.iterator()); while (iterator.hasNext()) { Map<String, Map<String, Object>> stepRawData = iterator.next(); String stepName = getStepName(stepRawData); validateStepName(stepName, errors); if (stepNames.contains(stepName) || onFailureStepNames.contains(stepName)) { errors.add(new RuntimeException("Step name: \'" + stepName + "\' appears more than once in the workflow. " + UNIQUE_STEP_NAME_MESSAGE_SUFFIX)); }// w ww .j a va 2 s . com stepNames.add(stepName); Map<String, Object> stepRawDataValue; String message = "Step: " + stepName + " syntax is illegal.\nBelow step name, there should " + "be a map of values in the format:\ndo:\n\top_name:"; try { stepRawDataValue = stepRawData.values().iterator().next(); if (MapUtils.isNotEmpty(stepRawDataValue)) { boolean loopKeyFound = stepRawDataValue.containsKey(LOOP_KEY); boolean parallelLoopKeyFound = stepRawDataValue.containsKey(PARALLEL_LOOP_KEY); if (loopKeyFound) { if (parallelLoopKeyFound) { errors.add(new RuntimeException( "Step: " + stepName + " syntax is illegal.\nBelow step name, " + "there can be either \'loop\' or \'aync_loop\' key.")); } message = "Step: " + stepName + " syntax is illegal.\nBelow the 'loop' keyword, there " + "should be a map of values in the format:\nfor:\ndo:\n\top_name:"; @SuppressWarnings("unchecked") Map<String, Object> loopRawData = (Map<String, Object>) stepRawDataValue.remove(LOOP_KEY); stepRawDataValue.putAll(loopRawData); } if (parallelLoopKeyFound) { message = "Step: " + stepName + " syntax is illegal.\nBelow the 'parallel_loop' keyword, there " + "should be a map of values in the format:\nfor:\ndo:\n\top_name:"; @SuppressWarnings("unchecked") Map<String, Object> parallelLoopRawData = (Map<String, Object>) stepRawDataValue .remove(PARALLEL_LOOP_KEY); errors.addAll(preCompileValidator.checkKeyWords(stepName, SlangTextualKeys.PARALLEL_LOOP_KEY, parallelLoopRawData, Collections.emptyList(), parallelLoopValidKeywords, null)); parallelLoopRawData.put(PARALLEL_LOOP_KEY, parallelLoopRawData.remove(FOR_KEY)); stepRawDataValue.putAll(parallelLoopRawData); } } } catch (ClassCastException ex) { stepRawDataValue = new HashMap<>(); errors.add(new RuntimeException(message)); } String defaultSuccess; Map<String, Map<String, Object>> nextStepData = iterator.peek(); if (nextStepData != null) { defaultSuccess = nextStepData.keySet().iterator().next(); } else { defaultSuccess = onFailureSection ? ScoreLangConstants.FAILURE_RESULT : SUCCESS_RESULT; } String onFailureStepName = onFailureStepFound ? onFailureStepNames.get(0) : null; StepModellingResult stepModellingResult = compileStep(stepName, stepRawDataValue, defaultSuccess, imports, defaultFailure, namespace, onFailureStepName, onFailureSection, sensitivityLevel); errors.addAll(stepModellingResult.getErrors()); steps.add(stepModellingResult.getStep()); } if (onFailureStepFound) { steps.addAll(onFailureSteps); } return new WorkflowModellingResult(new Workflow(steps), errors); }
From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java
/** * This takes the LQI's grouped by likely regions and attempts to bulk load them. Any failures are * re-queued for another pass with the groupOrSplitPhase. *//*from w ww .j ava2 s. c o m*/ protected void bulkLoadPhase(final HTable table, final HConnection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException { // atomically bulk load the groups. Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<Future<List<LoadQueueItem>>>(); for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) { final byte[] first = e.getKey().array(); final Collection<LoadQueueItem> lqis = e.getValue(); final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() { public List<LoadQueueItem> call() throws Exception { List<LoadQueueItem> toRetry = tryAtomicRegionLoad(conn, table.getTableName(), first, lqis); return toRetry; } }; loadingFutures.add(pool.submit(call)); } // get all the results. for (Future<List<LoadQueueItem>> future : loadingFutures) { try { List<LoadQueueItem> toRetry = future.get(); // LQIs that are requeued to be regrouped. queue.addAll(toRetry); } catch (ExecutionException e1) { Throwable t = e1.getCause(); if (t instanceof IOException) { // At this point something unrecoverable has happened. // TODO Implement bulk load recovery throw new IOException("BulkLoad encountered an unrecoverable problem", t); } LOG.error("Unexpected execution exception during bulk load", e1); throw new IllegalStateException(t); } catch (InterruptedException e1) { LOG.error("Unexpected interrupted exception during bulk load", e1); throw new IllegalStateException(e1); } } }
From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java
/** * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely bulk load region targets. *///from ww w . j av a 2 s. co m private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool, Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException { // <region start key, LQI> need synchronized only within this scope of this // phase because of the puts that happen in futures. Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create(); final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs); // drain LQIs and figure out bulk load groups Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>(); while (!queue.isEmpty()) { final LoadQueueItem item = queue.remove(); final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() { public List<LoadQueueItem> call() throws Exception { List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys); return splits; } }; splittingFutures.add(pool.submit(call)); } // get all the results. All grouping and splitting must finish before // we can attempt the atomic loads. for (Future<List<LoadQueueItem>> lqis : splittingFutures) { try { List<LoadQueueItem> splits = lqis.get(); if (splits != null) { queue.addAll(splits); } } catch (ExecutionException e1) { Throwable t = e1.getCause(); if (t instanceof IOException) { LOG.error("IOException during splitting", e1); throw (IOException) t; // would have been thrown if not parallelized, } LOG.error("Unexpected execution exception during splitting", e1); throw new IllegalStateException(t); } catch (InterruptedException e1) { LOG.error("Unexpected interrupted exception during splitting", e1); throw new IllegalStateException(e1); } } return regionGroups; }
From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java
/** * This takes the LQI's grouped by likely regions and attempts to bulk load * them. Any failures are re-queued for another pass with the * groupOrSplitPhase.// w w w . jav a 2 s . c o m */ protected void bulkLoadPhase(final HTable table, final HConnection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException { // atomically bulk load the groups. Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<Future<List<LoadQueueItem>>>(); for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) { final byte[] first = e.getKey().array(); final Collection<LoadQueueItem> lqis = e.getValue(); final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() { public List<LoadQueueItem> call() throws Exception { List<LoadQueueItem> toRetry = tryAtomicRegionLoad(conn, table.getName(), first, lqis); return toRetry; } }; loadingFutures.add(pool.submit(call)); } // get all the results. for (Future<List<LoadQueueItem>> future : loadingFutures) { try { List<LoadQueueItem> toRetry = future.get(); // LQIs that are requeued to be regrouped. queue.addAll(toRetry); } catch (ExecutionException e1) { Throwable t = e1.getCause(); if (t instanceof IOException) { // At this point something unrecoverable has happened. // TODO Implement bulk load recovery throw new IOException("BulkLoad encountered an unrecoverable problem", t); } LOG.error("Unexpected execution exception during bulk load", e1); throw new IllegalStateException(t); } catch (InterruptedException e1) { LOG.error("Unexpected interrupted exception during bulk load", e1); throw (InterruptedIOException) new InterruptedIOException().initCause(e1); } } }