Example usage for java.util Deque size

List of usage examples for java.util Deque size

Introduction

In this page you can find the example usage for java.util Deque size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this deque.

Usage

From source file:org.apache.hadoop.hive.ql.QTestUtil2.java

/**
 * Given the current configurations (e.g., hadoop version and execution
 * mode), return the correct file name to compare with the current test run
 * output./*from   w ww.  j  a va  2 s  .  c  o m*/
 * 
 * @param outDir
 *            The directory where the reference log files are stored.
 * @param testName
 *            The test file name (terminated by ".out").
 * @return The file name appended with the configuration values if it
 *         exists.
 */
public String outPath(String outDir, String testName) {
    String ret = (new File(outDir, testName)).getPath();
    // List of configurations. Currently the list consists of hadoop version
    // and execution mode only
    List<String> configs = new ArrayList<String>();
    configs.add(this.hadoopVer);

    Deque<String> stack = new LinkedList<String>();
    StringBuilder sb = new StringBuilder();
    sb.append(testName);
    stack.push(sb.toString());

    // example file names are input1.q.out_0.20.0_minimr or
    // input2.q.out_0.17
    for (String s : configs) {
        sb.append('_');
        sb.append(s);
        stack.push(sb.toString());
    }
    while (stack.size() > 0) {
        String fileName = stack.pop();
        File f = new File(outDir, fileName);
        if (f.exists()) {
            ret = f.getPath();
            break;
        }
    }
    return ret;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy.java

ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, int[] pending, int[] reserved, int[] apps,
        int[] gran, int[] queues) {
    float tot = leafAbsCapacities(abs, queues);
    Deque<ParentQueue> pqs = new LinkedList<ParentQueue>();
    ParentQueue root = mockParentQueue(null, queues[0], pqs);
    when(root.getQueueName()).thenReturn("/");
    when(root.getAbsoluteUsedCapacity()).thenReturn(used[0] / tot);
    when(root.getAbsoluteCapacity()).thenReturn(abs[0] / tot);
    when(root.getAbsoluteMaximumCapacity()).thenReturn(maxCap[0] / tot);
    when(root.getQueuePath()).thenReturn("root");
    boolean preemptionDisabled = mockPreemptionStatus("root");
    when(root.getPreemptionDisabled()).thenReturn(preemptionDisabled);

    for (int i = 1; i < queues.length; ++i) {
        final CSQueue q;
        final ParentQueue p = pqs.removeLast();
        final String queueName = "queue" + ((char) ('A' + i - 1));
        if (queues[i] > 0) {
            q = mockParentQueue(p, queues[i], pqs);
        } else {//from   w  w  w.  j  a v  a  2s .c om
            q = mockLeafQueue(p, tot, i, abs, used, pending, reserved, apps, gran);
        }
        when(q.getParent()).thenReturn(p);
        when(q.getQueueName()).thenReturn(queueName);
        when(q.getAbsoluteUsedCapacity()).thenReturn(used[i] / tot);
        when(q.getAbsoluteCapacity()).thenReturn(abs[i] / tot);
        when(q.getAbsoluteMaximumCapacity()).thenReturn(maxCap[i] / tot);
        String parentPathName = p.getQueuePath();
        parentPathName = (parentPathName == null) ? "root" : parentPathName;
        String queuePathName = (parentPathName + "." + queueName).replace("/", "root");
        when(q.getQueuePath()).thenReturn(queuePathName);
        preemptionDisabled = mockPreemptionStatus(queuePathName);
        when(q.getPreemptionDisabled()).thenReturn(preemptionDisabled);
    }
    assert 0 == pqs.size();
    return root;
}

From source file:org.apache.phoenix.hive.HiveTestUtil.java

/**
 * Given the current configurations (e.g., hadoop version and execution mode), return
 * the correct file name to compare with the current test run output.
 *
 * @param outDir   The directory where the reference log files are stored.
 * @param testName The test file name (terminated by ".out").
 * @return The file name appended with the configuration values if it exists.
 *//*from  ww  w.ja va2  s.co  m*/
public String outPath(String outDir, String testName) {
    String ret = (new File(outDir, testName)).getPath();
    // List of configurations. Currently the list consists of hadoop version and execution
    // mode only
    List<String> configs = new ArrayList<String>();
    configs.add(this.hadoopVer);

    Deque<String> stack = new LinkedList<String>();
    StringBuilder sb = new StringBuilder();
    sb.append(testName);
    stack.push(sb.toString());

    // example file names are input1.q.out_0.20.0_minimr or input2.q.out_0.17
    for (String s : configs) {
        sb.append('_');
        sb.append(s);
        stack.push(sb.toString());
    }
    while (stack.size() > 0) {
        String fileName = stack.pop();
        File f = new File(outDir, fileName);
        if (f.exists()) {
            ret = f.getPath();
            break;
        }
    }
    return ret;
}

From source file:org.apache.tajo.engine.planner.global.ParallelExecutionQueue.java

@Override
public synchronized int size() {
    int size = 0;
    for (Deque<ExecutionBlock> queue : executable) {
        size += queue.size();
    }/*from  w  ww  . j  a  v  a  2 s. com*/
    return size;
}

From source file:org.apache.tajo.engine.planner.global.ParallelExecutionQueue.java

@Override
public synchronized ExecutionBlock[] next(ExecutionBlockId doneNow) {
    executed.add(doneNow);/*  w w  w.  ja v  a2s.com*/

    int remaining = 0;
    for (Deque<ExecutionBlock> queue : executable) {
        if (!queue.isEmpty() && isExecutableNow(queue.peekLast())) {
            LOG.info("Next executable block " + queue.peekLast());
            return new ExecutionBlock[] { queue.removeLast() };
        }
        remaining += queue.size();
    }
    return remaining > 0 ? new ExecutionBlock[0] : null;
}

From source file:org.apache.tez.dag.api.DAG.java

@Private
public synchronized DAGPlan createDag(Configuration tezConf, Credentials extraCredentials,
        Map<String, LocalResource> tezJarResources, LocalResource binaryConfig, boolean tezLrsAsArchive,
        ServicePluginsDescriptor servicePluginsDescriptor, JavaOptsChecker javaOptsChecker) {
    Deque<String> topologicalVertexStack = verify(true);
    verifyLocalResources(tezConf);//  w  ww. j ava  2 s.  c  o  m

    DAGPlan.Builder dagBuilder = DAGPlan.newBuilder();
    dagBuilder.setName(this.name);

    if (this.callerContext != null) {
        dagBuilder.setCallerContext(DagTypeConverters.convertCallerContextToProto(callerContext));
    }
    if (this.dagInfo != null && !this.dagInfo.isEmpty()) {
        dagBuilder.setDagInfo(this.dagInfo);
    }

    // Setup default execution context.
    VertexExecutionContext defaultContext = getDefaultExecutionContext();
    verifyExecutionContext(defaultContext, servicePluginsDescriptor, "DAGDefault");
    if (defaultContext != null) {
        DAGProtos.VertexExecutionContextProto contextProto = DagTypeConverters.convertToProto(defaultContext);
        dagBuilder.setDefaultExecutionContext(contextProto);
    }

    if (!vertexGroups.isEmpty()) {
        for (VertexGroup av : vertexGroups) {
            GroupInfo groupInfo = av.getGroupInfo();
            PlanVertexGroupInfo.Builder groupBuilder = PlanVertexGroupInfo.newBuilder();
            groupBuilder.setGroupName(groupInfo.getGroupName());
            for (Vertex v : groupInfo.getMembers()) {
                groupBuilder.addGroupMembers(v.getName());
            }
            groupBuilder.addAllOutputs(groupInfo.outputs);
            for (Map.Entry<String, InputDescriptor> entry : groupInfo.edgeMergedInputs.entrySet()) {
                groupBuilder.addEdgeMergedInputs(
                        PlanGroupInputEdgeInfo.newBuilder().setDestVertexName(entry.getKey())
                                .setMergedInput(DagTypeConverters.convertToDAGPlan(entry.getValue())));
            }
            dagBuilder.addVertexGroups(groupBuilder);
        }
    }

    Credentials dagCredentials = new Credentials();
    if (extraCredentials != null) {
        dagCredentials.mergeAll(extraCredentials);
    }
    dagCredentials.mergeAll(credentials);
    if (!commonTaskLocalFiles.isEmpty()) {
        dagBuilder.addAllLocalResource(DagTypeConverters.convertToDAGPlan(commonTaskLocalFiles));
    }

    Preconditions.checkArgument(topologicalVertexStack.size() == vertices.size(),
            "size of topologicalVertexStack is:" + topologicalVertexStack.size() + " while size of vertices is:"
                    + vertices.size() + ", make sure they are the same in order to sort the vertices");
    while (!topologicalVertexStack.isEmpty()) {
        Vertex vertex = vertices.get(topologicalVertexStack.pop());
        // infer credentials, resources and parallelism from data source
        Resource vertexTaskResource = vertex.getTaskResource();
        if (vertexTaskResource == null) {
            vertexTaskResource = Resource.newInstance(
                    tezConf.getInt(TezConfiguration.TEZ_TASK_RESOURCE_MEMORY_MB,
                            TezConfiguration.TEZ_TASK_RESOURCE_MEMORY_MB_DEFAULT),
                    tezConf.getInt(TezConfiguration.TEZ_TASK_RESOURCE_CPU_VCORES,
                            TezConfiguration.TEZ_TASK_RESOURCE_CPU_VCORES_DEFAULT));
        }
        Map<String, LocalResource> vertexLRs = Maps.newHashMap();
        vertexLRs.putAll(vertex.getTaskLocalFiles());
        List<DataSourceDescriptor> dataSources = vertex.getDataSources();
        for (DataSourceDescriptor dataSource : dataSources) {
            if (dataSource.getCredentials() != null) {
                dagCredentials.addAll(dataSource.getCredentials());
            }
            if (dataSource.getAdditionalLocalFiles() != null) {
                TezCommonUtils.addAdditionalLocalResources(dataSource.getAdditionalLocalFiles(), vertexLRs,
                        "Vertex " + vertex.getName());
            }
        }
        if (tezJarResources != null) {
            TezCommonUtils.addAdditionalLocalResources(tezJarResources, vertexLRs,
                    "Vertex " + vertex.getName());
        }
        if (binaryConfig != null) {
            vertexLRs.put(TezConstants.TEZ_PB_BINARY_CONF_NAME, binaryConfig);
        }
        int vertexParallelism = vertex.getParallelism();
        VertexLocationHint vertexLocationHint = vertex.getLocationHint();
        if (dataSources.size() == 1) {
            DataSourceDescriptor dataSource = dataSources.get(0);
            if (vertexParallelism == -1 && dataSource.getNumberOfShards() > -1) {
                vertexParallelism = dataSource.getNumberOfShards();
            }
            if (vertexLocationHint == null && dataSource.getLocationHint() != null) {
                vertexLocationHint = dataSource.getLocationHint();
            }
        }
        if (vertexParallelism == -1) {
            Preconditions.checkState(vertexLocationHint == null,
                    "Cannot specify vertex location hint without specifying vertex parallelism. Vertex: "
                            + vertex.getName());
        } else if (vertexLocationHint != null) {
            Preconditions.checkState(vertexParallelism == vertexLocationHint.getTaskLocationHints().size(),
                    "vertex task location hint must equal vertex parallelism. Vertex: " + vertex.getName());
        }
        for (DataSinkDescriptor dataSink : vertex.getDataSinks()) {
            if (dataSink.getCredentials() != null) {
                dagCredentials.addAll(dataSink.getCredentials());
            }
        }

        VertexPlan.Builder vertexBuilder = VertexPlan.newBuilder();
        vertexBuilder.setName(vertex.getName());
        vertexBuilder.setType(PlanVertexType.NORMAL); // vertex type is implicitly NORMAL until  TEZ-46.
        vertexBuilder
                .setProcessorDescriptor(DagTypeConverters.convertToDAGPlan(vertex.getProcessorDescriptor()));

        // Vertex ExecutionContext setup
        VertexExecutionContext execContext = vertex.getVertexExecutionContext();
        verifyExecutionContext(execContext, servicePluginsDescriptor, vertex.getName());
        if (execContext != null) {
            DAGProtos.VertexExecutionContextProto contextProto = DagTypeConverters.convertToProto(execContext);
            vertexBuilder.setExecutionContext(contextProto);
        }
        // End of VertexExecutionContext setup.

        if (vertex.getInputs().size() > 0) {
            for (RootInputLeafOutput<InputDescriptor, InputInitializerDescriptor> input : vertex.getInputs()) {
                vertexBuilder.addInputs(DagTypeConverters.convertToDAGPlan(input));
            }
        }
        if (vertex.getOutputs().size() > 0) {
            for (RootInputLeafOutput<OutputDescriptor, OutputCommitterDescriptor> output : vertex
                    .getOutputs()) {
                vertexBuilder.addOutputs(DagTypeConverters.convertToDAGPlan(output));
            }
        }

        if (vertex.getConf() != null && vertex.getConf().size() > 0) {
            ConfigurationProto.Builder confBuilder = ConfigurationProto.newBuilder();
            TezUtils.populateConfProtoFromEntries(vertex.getConf().entrySet(), confBuilder);
            vertexBuilder.setVertexConf(confBuilder);
        }

        //task config
        PlanTaskConfiguration.Builder taskConfigBuilder = PlanTaskConfiguration.newBuilder();
        taskConfigBuilder.setNumTasks(vertexParallelism);
        taskConfigBuilder.setMemoryMb(vertexTaskResource.getMemory());
        taskConfigBuilder.setVirtualCores(vertexTaskResource.getVirtualCores());

        try {
            taskConfigBuilder.setJavaOpts(TezClientUtils
                    .addDefaultsToTaskLaunchCmdOpts(vertex.getTaskLaunchCmdOpts(), tezConf, javaOptsChecker));
        } catch (TezException e) {
            throw new TezUncheckedException(
                    "Invalid TaskLaunchCmdOpts defined for Vertex " + vertex.getName() + " : " + e.getMessage(),
                    e);
        }

        taskConfigBuilder.setTaskModule(vertex.getName());
        if (!vertexLRs.isEmpty()) {
            taskConfigBuilder.addAllLocalResource(DagTypeConverters.convertToDAGPlan(vertexLRs));
        }

        Map<String, String> taskEnv = Maps.newHashMap(vertex.getTaskEnvironment());
        TezYARNUtils.setupDefaultEnv(taskEnv, tezConf, TezConfiguration.TEZ_TASK_LAUNCH_ENV,
                TezConfiguration.TEZ_TASK_LAUNCH_ENV_DEFAULT,
                TezConfiguration.TEZ_TASK_LAUNCH_CLUSTER_DEFAULT_ENV,
                TezConfiguration.TEZ_TASK_LAUNCH_CLUSTER_DEFAULT_ENV_DEFAULT, tezLrsAsArchive);
        for (Map.Entry<String, String> entry : taskEnv.entrySet()) {
            PlanKeyValuePair.Builder envSettingBuilder = PlanKeyValuePair.newBuilder();
            envSettingBuilder.setKey(entry.getKey());
            envSettingBuilder.setValue(entry.getValue());
            taskConfigBuilder.addEnvironmentSetting(envSettingBuilder);
        }

        if (vertexLocationHint != null) {
            if (vertexLocationHint.getTaskLocationHints() != null) {
                for (TaskLocationHint hint : vertexLocationHint.getTaskLocationHints()) {
                    PlanTaskLocationHint.Builder taskLocationHintBuilder = PlanTaskLocationHint.newBuilder();
                    // we can allow this later on if needed
                    if (hint.getAffinitizedTask() != null) {
                        throw new TezUncheckedException(
                                "Task based affinity may not be specified via the DAG API");
                    }

                    if (hint.getHosts() != null) {
                        taskLocationHintBuilder.addAllHost(hint.getHosts());
                    }
                    if (hint.getRacks() != null) {
                        taskLocationHintBuilder.addAllRack(hint.getRacks());
                    }

                    vertexBuilder.addTaskLocationHint(taskLocationHintBuilder);
                }
            }
        }

        if (vertex.getVertexManagerPlugin() != null) {
            vertexBuilder.setVertexManagerPlugin(
                    DagTypeConverters.convertToDAGPlan(vertex.getVertexManagerPlugin()));
        }

        for (Edge inEdge : vertex.getInputEdges()) {
            vertexBuilder.addInEdgeId(inEdge.getId());
        }

        for (Edge outEdge : vertex.getOutputEdges()) {
            vertexBuilder.addOutEdgeId(outEdge.getId());
        }

        vertexBuilder.setTaskConfig(taskConfigBuilder);
        dagBuilder.addVertex(vertexBuilder);
    }

    for (Edge edge : edges) {
        EdgePlan.Builder edgeBuilder = EdgePlan.newBuilder();
        edgeBuilder.setId(edge.getId());
        edgeBuilder.setInputVertexName(edge.getInputVertex().getName());
        edgeBuilder.setOutputVertexName(edge.getOutputVertex().getName());
        edgeBuilder.setDataMovementType(
                DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getDataMovementType()));
        edgeBuilder.setDataSourceType(
                DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getDataSourceType()));
        edgeBuilder.setSchedulingType(
                DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getSchedulingType()));
        edgeBuilder.setEdgeSource(DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getEdgeSource()));
        edgeBuilder.setEdgeDestination(
                DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getEdgeDestination()));
        if (edge.getEdgeProperty().getDataMovementType() == DataMovementType.CUSTOM) {
            if (edge.getEdgeProperty().getEdgeManagerDescriptor() != null) {
                edgeBuilder.setEdgeManager(
                        DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getEdgeManagerDescriptor()));
            } // else the AM will deal with this.
        }
        dagBuilder.addEdge(edgeBuilder);
    }

    if (dagAccessControls != null) {
        dagBuilder.setAclInfo(DagTypeConverters.convertDAGAccessControlsToProto(dagAccessControls));
    }

    ConfigurationProto.Builder confProtoBuilder = ConfigurationProto.newBuilder();
    if (!this.dagConf.isEmpty()) {
        TezUtils.populateConfProtoFromEntries(this.dagConf.entrySet(), confProtoBuilder);
    }
    // Copy historyLogLevel from tezConf into dagConf if its not overridden in dagConf.
    String logLevel = this.dagConf.get(TezConfiguration.TEZ_HISTORY_LOGGING_LOGLEVEL);
    if (logLevel != null) {
        // The config is from dagConf, we have already added it to the proto above, just check if
        // the value is valid.
        if (!HistoryLogLevel.validateLogLevel(logLevel)) {
            throw new IllegalArgumentException("Config: " + TezConfiguration.TEZ_HISTORY_LOGGING_LOGLEVEL
                    + " is set to invalid value: " + logLevel);
        }
    } else {
        // Validate and set value from tezConf.
        logLevel = tezConf.get(TezConfiguration.TEZ_HISTORY_LOGGING_LOGLEVEL);
        if (logLevel != null) {
            if (!HistoryLogLevel.validateLogLevel(logLevel)) {
                throw new IllegalArgumentException("Config: " + TezConfiguration.TEZ_HISTORY_LOGGING_LOGLEVEL
                        + " is set to invalid value: " + logLevel);
            }
            PlanKeyValuePair.Builder kvp = PlanKeyValuePair.newBuilder();
            kvp.setKey(TezConfiguration.TEZ_HISTORY_LOGGING_LOGLEVEL);
            kvp.setValue(logLevel);
            confProtoBuilder.addConfKeyValues(kvp);
        }
    }
    dagBuilder.setDagConf(confProtoBuilder);

    if (dagCredentials != null) {
        dagBuilder.setCredentialsBinary(DagTypeConverters.convertCredentialsToProto(dagCredentials));
        TezCommonUtils.logCredentials(LOG, dagCredentials, "dag");
    }

    return dagBuilder.build();
}

From source file:org.jahia.utils.maven.plugin.support.MavenAetherHelperUtils.java

static String getTrailPadding(Deque<String> dependencyTrail) {
    StringBuffer padding = new StringBuffer();
    for (int i = 0; i < dependencyTrail.size(); i++) {
        padding.append("  ");
    }/*from   ww  w. jav  a2s.  co m*/
    padding.append(dependencyTrail.peek());
    padding.append(" ");
    return padding.toString();
}

From source file:org.jasig.resource.aggr.ResourcesAggregatorImpl.java

/**
 * Iterate over the list of {@link BasicInclude} sub-classes using the {@link AggregatorCallback#willAggregate(BasicInclude, BasicInclude)}
 * and {@link AggregatorCallback#aggregate(Deque)} to generate an aggregated list of {@link BasicInclude} sub-classes.
 *//*w ww  .j a v a2s  .  c  om*/
protected <T extends BasicInclude> List<T> aggregateBasicIncludes(List<T> original,
        AggregatorCallback<T> callback) throws IOException {
    final List<T> result = new LinkedList<T>();
    final Deque<T> currentAggregateList = new LinkedList<T>();
    for (final T originalElement : original) {
        // handle first loop iteration
        if (currentAggregateList.isEmpty()) {
            currentAggregateList.add(originalElement);
        } else {
            // test if 'originalElement' will aggregate with head element in currentAggregate 
            final T baseElement = currentAggregateList.getFirst();
            if (callback.willAggregate(originalElement, baseElement)) {
                // matches current criteria, add to currentAggregate
                currentAggregateList.add(originalElement);
            } else {
                // doesn't match criteria
                // generate new single aggregate from currentAggregateList
                final T aggregate = callback.aggregate(currentAggregateList);
                if (null != aggregate) {
                    // push result
                    result.add(aggregate);
                } else {
                    this.logger
                            .warn("Generated 0 byte aggregate from: " + generatePathList(currentAggregateList));
                }

                // zero out currentAggregateList
                currentAggregateList.clear();

                // add originalElement to empty list
                currentAggregateList.add(originalElement);
            }
        }
    }

    // flush the currentAggregateList
    if (currentAggregateList.size() > 0) {
        final T aggregate = callback.aggregate(currentAggregateList);
        if (null != aggregate) {
            result.add(aggregate);
        } else {
            this.logger.warn("Generated 0 byte aggregate from: " + generatePathList(currentAggregateList));
        }
    }

    return result;
}

From source file:org.jasig.resource.aggr.ResourcesAggregatorImpl.java

/**
 * Aggregate the specified Deque of elements into a single element. The provided MessageDigest is used for
 * building the file name based on the hash of the file contents. The callback is used for type specific
 * operations.//from  w  w  w.  j ava  2  s.  co m
 */
protected <T extends BasicInclude> T aggregateList(final MessageDigest digest, final Deque<T> elements,
        final List<File> skinDirectories, final File outputRoot, final File alternateOutput,
        final String extension, final AggregatorCallback<T> callback) throws IOException {

    if (null == elements || elements.size() == 0) {
        return null;
    }

    // reference to the head of the list
    final T headElement = elements.getFirst();
    if (elements.size() == 1 && this.resourcesDao.isAbsolute(headElement)) {
        return headElement;
    }

    final File tempFile = File.createTempFile("working.", extension);
    final File aggregateOutputFile;
    try {
        //Make sure we're working with a clean MessageDigest
        digest.reset();
        TrimmingWriter trimmingWriter = null;
        try {
            final BufferedOutputStream bufferedFileStream = new BufferedOutputStream(
                    new FileOutputStream(tempFile));
            final MessageDigestOutputStream digestStream = new MessageDigestOutputStream(bufferedFileStream,
                    digest);
            final OutputStreamWriter aggregateWriter = new OutputStreamWriter(digestStream, this.encoding);
            trimmingWriter = new TrimmingWriter(aggregateWriter);

            for (final T element : elements) {
                final File resourceFile = this.findFile(skinDirectories, element.getValue());

                FileInputStream fis = null;
                try {
                    fis = new FileInputStream(resourceFile);
                    final BOMInputStream bomIs = new BOMInputStream(new BufferedInputStream(fis));
                    if (bomIs.hasBOM()) {
                        logger.debug("Stripping UTF-8 BOM from: " + resourceFile);
                    }
                    final Reader resourceIn = new InputStreamReader(bomIs, this.encoding);
                    if (element.isCompressed()) {
                        IOUtils.copy(resourceIn, trimmingWriter);
                    } else {
                        callback.compress(resourceIn, trimmingWriter);
                    }
                } catch (IOException e) {
                    throw new IOException(
                            "Failed to read '" + resourceFile + "' for skin: " + skinDirectories.get(0), e);
                } finally {
                    IOUtils.closeQuietly(fis);
                }
                trimmingWriter.write(SystemUtils.LINE_SEPARATOR);
            }
        } finally {
            IOUtils.closeQuietly(trimmingWriter);
        }

        if (trimmingWriter.getCharCount() == 0) {
            return null;
        }

        // temp file is created, get checksum
        final String checksum = Base64.encodeBase64URLSafeString(digest.digest());
        digest.reset();

        // create a new file name
        final String newFileName = checksum + extension;

        // Build the new file name and path
        if (alternateOutput == null) {
            final String elementRelativePath = FilenameUtils.getFullPath(headElement.getValue());
            final File directoryInOutputRoot = new File(outputRoot, elementRelativePath);
            // create the same directory structure in the output root
            directoryInOutputRoot.mkdirs();

            aggregateOutputFile = new File(directoryInOutputRoot, newFileName).getCanonicalFile();
        } else {
            aggregateOutputFile = new File(alternateOutput, newFileName).getCanonicalFile();
        }

        //Move the aggregate file into the correct location
        FileUtils.deleteQuietly(aggregateOutputFile);
        FileUtils.moveFile(tempFile, aggregateOutputFile);
    } finally {
        //Make sure the temp file gets deleted
        FileUtils.deleteQuietly(tempFile);
    }

    final String newResultValue = RelativePath.getRelativePath(outputRoot, aggregateOutputFile);

    this.logAggregation(elements, newResultValue);

    return callback.getAggregateElement(newResultValue, elements);
}

From source file:org.marketcetera.strategy.LanguageTestBase.java

/**
 * Performs a single iteration of an <code>ExecutionReport</code> test. 
 *
 * @param inExecutionReportCount an <code>int</code> value containing the number of execution reports expected
 * @param inSendOrders a <code>boolean</code> value indicating if the orders should be submitted or not (simulate failure)
 * @throws Exception if an error occurs//from  w w w  . j  av a2s .  com
 */
private void doExecutionReportTest(final int inExecutionReportCount, boolean inSendOrders) throws Exception {
    AbstractRunningStrategy.setProperty("executionReportCount", "0");
    AbstractRunningStrategy.setProperty("price", "1000");
    AbstractRunningStrategy.setProperty("quantity", "500");
    AbstractRunningStrategy.setProperty("side", Side.Sell.toString());
    AbstractRunningStrategy.setProperty("symbol", "METC");
    AbstractRunningStrategy.setProperty("orderType", OrderType.Market.name());
    AbstractRunningStrategy.setProperty("quantity", "10000");
    // generate expected order
    List<ExecutionReport> expectedExecutionReports = new ArrayList<ExecutionReport>();
    StrategyImpl runningStrategy = getRunningStrategy(theStrategy);
    OrderID orderID = null;
    if (inSendOrders) {
        // this will trigger the strategy to submit an order
        runningStrategy.dataReceived(askEvent);
        // generate expected result
        OrderSingle expectedOrder = Factory.getInstance().createOrderSingle();
        expectedOrder.setPrice(new BigDecimal("1000"));
        expectedOrder.setQuantity(new BigDecimal("500"));
        expectedOrder.setSide(Side.Sell);
        expectedOrder.setInstrument(new Equity("METC"));
        expectedOrder.setOrderType(OrderType.Market);
        expectedOrder.setQuantity(new BigDecimal(10000));
        String orderIDString = AbstractRunningStrategy.getProperty("orderID");
        if (orderIDString != null) {
            orderID = new OrderID(orderIDString);
            expectedOrder.setOrderID(new OrderID(orderIDString));
        }
        if (MockRecorderModule.shouldSendExecutionReports) {
            expectedExecutionReports.addAll(generateExecutionReports(expectedOrder));
        }
    }
    runningStrategy
            .dataReceived(EventTestBase.generateEquityBidEvent(System.nanoTime(), System.currentTimeMillis(),
                    new Equity("METC"), "Q", new BigDecimal("100.00"), new BigDecimal("10000")));
    assertEquals(inExecutionReportCount,
            Integer.parseInt(AbstractRunningStrategy.getProperty("executionReportCount")));
    Deque<ReportBase> actualExecutionReports = ((AbstractRunningStrategy) runningStrategy.getRunningStrategy())
            .getExecutionReports(orderID);
    assertEquals(expectedExecutionReports.size(), actualExecutionReports.size());
    int index = 0;
    Collections.reverse(expectedExecutionReports);
    for (ReportBase actualExecutionReport : actualExecutionReports) {
        TypesTestBase.assertExecReportEquals(expectedExecutionReports.get(index++),
                (ExecutionReport) actualExecutionReport);
    }
    AbstractRunningStrategy.getProperties().clear();
    MockRecorderModule.ordersReceived = 0;
}