Example usage for java.lang Byte toString

List of usage examples for java.lang Byte toString

Introduction

In this page you can find the example usage for java.lang Byte toString.

Prototype

public String toString() 

Source Link

Document

Returns a String object representing this Byte 's value.

Usage

From source file:org.vertx.java.http.eventbusbridge.integration.MessageSendTest.java

@Test
public void testSendingByteXml() throws IOException {
    final EventBusMessageType messageType = EventBusMessageType.Byte;
    final Byte sentByte = Byte.MAX_VALUE;
    Map<String, String> expectations = createExpectations("someaddress",
            Base64.encodeAsString(sentByte.toString()), messageType);
    Handler<Message> messageConsumerHandler = new MessageSendHandler(sentByte, expectations);
    vertx.eventBus().registerHandler(expectations.get("address"), messageConsumerHandler);
    String body = TemplateHelper.generateOutputUsingTemplate(SEND_REQUEST_TEMPLATE_XML, expectations);
    HttpRequestHelper.sendHttpPostRequest(url, body, (VertxInternal) vertx, Status.ACCEPTED.getStatusCode(),
            MediaType.APPLICATION_XML);//from  w w  w .j  a  v  a2 s .c  om
}

From source file:org.apache.axis2.databinding.utils.ConverterUtil.java

public static String convertToString(Byte o) {
    return o.toString();
}

From source file:org.vertx.java.http.eventbusbridge.integration.MessagePublishTest.java

@Test
public void testPublishingByteJson() throws IOException {
    final EventBusMessageType messageType = EventBusMessageType.Byte;
    final Byte sentByte = Byte.MIN_VALUE;
    Map<String, String> expectations = createExpectations(generateUniqueAddress(),
            Base64.encodeAsString(sentByte.toString()), messageType);
    final AtomicInteger completedCount = new AtomicInteger(0);
    Handler<Message> messagePublishHandler = new MessagePublishHandler(sentByte, expectations, completedCount);
    registerListenersAndCheckForResponses(messagePublishHandler, expectations, NUMBER_OF_PUBLISH_HANDLERS,
            completedCount);/*from  w ww  .j  a va2s  . c o  m*/
    String body = TemplateHelper.generateOutputUsingTemplate(SEND_REQUEST_TEMPLATE_JSON, expectations);
    HttpRequestHelper.sendHttpPostRequest(url, body, (VertxInternal) vertx, Status.ACCEPTED.getStatusCode(),
            MediaType.APPLICATION_JSON);
}

From source file:org.vertx.java.http.eventbusbridge.integration.MessagePublishTest.java

@Test
public void testPublishingByteXml() throws IOException {
    final EventBusMessageType messageType = EventBusMessageType.Byte;
    final Byte sentByte = Byte.MIN_VALUE;
    Map<String, String> expectations = createExpectations(generateUniqueAddress(),
            Base64.encodeAsString(sentByte.toString()), messageType);
    final AtomicInteger completedCount = new AtomicInteger(0);
    Handler<Message> messagePublishHandler = new MessagePublishHandler(sentByte, expectations, completedCount);
    registerListenersAndCheckForResponses(messagePublishHandler, expectations, NUMBER_OF_PUBLISH_HANDLERS,
            completedCount);//from  www. ja va 2  s .  c  om
    String body = TemplateHelper.generateOutputUsingTemplate(SEND_REQUEST_TEMPLATE_XML, expectations);
    HttpRequestHelper.sendHttpPostRequest(url, body, (VertxInternal) vertx, Status.ACCEPTED.getStatusCode(),
            MediaType.APPLICATION_XML);
}

From source file:com.ebay.erl.mobius.core.Persistable.java

/**
 * Save the dataset and store the <code>projections</code>
 * into a the specified <code>output</code> path in the 
 * format of the given <code>outputFormat</code>.
 * <p>//from  www .  j a  v a 2s. c om
 * 
 * Only the rows that meet the <code>criteria</code> will be 
 * stored.  The <code>criteria</code> can only evaluate the 
 * columns specified in the <code>projections</code>.
 * <p>
 * 
 * <code>output</code> will be deleted before the job gets started.
 */
public Dataset save(MobiusJob job, Path output, Class<? extends FileOutputFormat> outputFormat,
        TupleCriterion criteria, Projectable... projections) throws IOException {
    if (projections == null || projections.length == 0)
        throw new IllegalArgumentException("Please specify the output columns.");

    // - VALIDATION - make sure no ambiguous column names.
    //
    // make sure the projections don't have two or more different columns that
    // have the same name but in different dataset, as we are going the use 
    // the {@link Column#getOutputColumnName} as the output schema of the
    // returned dataset.
    Set<String> columnNames = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER);
    for (Projectable aColumn : projections) {
        String[] outputSchema = aColumn.getOutputSchema();
        for (String anOutput : outputSchema) {
            if (!columnNames.contains(anOutput)) {
                columnNames.add(anOutput);
            } else {
                throw new IllegalArgumentException(columnNames + " from " + aColumn.toString()
                        + " is ambiguous, it has the same name"
                        + "as aother selected projected in different dataset, please use Column#setNewName(String) to"
                        + "change it.");
            }
        }
    }

    // - VALIDATION - if <code>criteria</code> is not null, need to make
    // sure the columns used in the criteria are in the output columns.
    if (criteria != null) {
        TupleCriterion.validate(columnNames, criteria);
        this.jobConf.set(ConfigureConstants.PERSISTANT_CRITERIA, SerializableUtil.serializeToBase64(criteria));
    }

    // setup {@link Dataset} to {@link Column} mapping so we can setup projection columns
    // for each dataset, and also perform validation on making sure all the projection columns 
    // are from the selected <code>datasets</code> only,
    Map<Dataset, List<Column>> datasetToColumns = new HashMap<Dataset, List<Column>>();

    for (Projectable aFunc : projections) {
        Column[] requiredInputColumns = aFunc.getInputColumns();
        for (Column aColumn : requiredInputColumns) {
            Dataset aDataset = aColumn.getDataset();
            // make sure the <code>aDataset</code> within the participated datasets
            boolean withinSelectedDataset = false;
            for (Dataset aSelectedDataset : this.datasets) {
                if (aSelectedDataset.equals(aDataset)) {
                    withinSelectedDataset = true;
                    break;
                }
            }

            if (!withinSelectedDataset) {
                // user select a column from a dataset that doesn't
                // in the selected datasets in this join/group by job.
                throw new IllegalArgumentException(aColumn.toString()
                        + " does not within the selected datasets "
                        + "in this join/group task, please select columns only from the selected datasets.");
            }

            List<Column> projectablesInADataset = null;
            if ((projectablesInADataset = datasetToColumns.get(aDataset)) == null) {
                projectablesInADataset = new LinkedList<Column>();
                datasetToColumns.put(aDataset, projectablesInADataset);
            }

            if (!projectablesInADataset.contains(aColumn))
                projectablesInADataset.add(aColumn);
        }
    }

    if (datasetToColumns.keySet().size() != this.datasets.length) {
        throw new IllegalArgumentException(
                "Please select at least one column from each dataset in the join/group-by job.");
    }

    // SETUP JOB
    if (this.userDefinedConf != null) {
        this.jobConf = new JobConf(Util.merge(this.jobConf, this.userDefinedConf));
    }
    this.jobConf.setJarByClass(job.getClass());
    this.jobConf.setMapOutputKeyClass(DataJoinKey.class);
    this.jobConf.setMapOutputValueClass(DataJoinValue.class);
    this.jobConf.setPartitionerClass(DataJoinKeyPartitioner.class);
    this.jobConf.setOutputValueGroupingComparator(DataJoinKey.Comparator.class);
    this.jobConf.setOutputKeyComparatorClass(DataJoinKey.class);
    this.jobConf.setReducerClass(DefaultMobiusReducer.class);
    this.jobConf.set(ConfigureConstants.PROJECTION_COLUMNS, SerializableUtil.serializeToBase64(projections));

    JobSetup.setupOutputs(this.jobConf, output, outputFormat);

    // setup input paths, projection columns for each datasets.
    for (byte assignedDatasetID = 0; assignedDatasetID < this.datasets.length; assignedDatasetID++) {
        Dataset aDataset = this.datasets[assignedDatasetID];

        // setup input for each dataset
        JobSetup.setupInputs(jobConf, aDataset, assignedDatasetID);

        // setup projection for each dataset
        JobSetup.setupProjections(jobConf, aDataset, assignedDatasetID,
                datasetToColumns.get(aDataset).toArray(new Column[0]));
    }

    // setup all dataset IDs
    for (int i = 0; i < this.datasets.length; i++) {
        Byte id = this.datasets[i].getID();
        if (!this.jobConf.get(ConfigureConstants.ALL_DATASET_IDS, "").isEmpty()) {
            this.jobConf.set(ConfigureConstants.ALL_DATASET_IDS,
                    this.jobConf.get(ConfigureConstants.ALL_DATASET_IDS) + "," + id);
        } else {
            this.jobConf.set(ConfigureConstants.ALL_DATASET_IDS, id.toString());
        }
    }

    boolean isCombinable = true;
    for (Projectable aFunc : projections) {
        aFunc.setConf(jobConf);

        if (!aFunc.isCombinable()) {
            isCombinable = false;
            LOGGER.info(aFunc.toString() + " is not combinable, #isCombinable() return false.");
            break;
        }
        if (aFunc instanceof GroupFunction && aFunc.useGroupKeyOnly()) {
            LOGGER.info(aFunc.toString()
                    + " is a group function and use group key as its input only, disable combiner.");
            isCombinable = false;
            break;
        }
    }

    LOGGER.info("Using Combiner? " + isCombinable);
    if (isCombinable) {
        jobConf.setCombinerClass(DefaultMobiusCombiner.class);
    }

    job.addToExecQueue(jobConf);

    AbstractDatasetBuilder builder = DatasetBuildersFactory.getInstance(job).getBuilder(outputFormat,
            "Dataset_" + output.getName());

    // form the output column from the projections
    List<String> outputColumns = new ArrayList<String>();
    for (Projectable func : projections) {
        String[] aProjectOutputs = func.getOutputSchema();
        for (String anOutputName : aProjectOutputs) {
            outputColumns.add(anOutputName);
        }
    }

    return builder.buildFromPreviousJob(jobConf, outputFormat, outputColumns.toArray(new String[0]));
}

From source file:org.vertx.java.http.eventbusbridge.integration.MessageSendWithReplyTest.java

@Test
public void testSendingByteJsonWithResponseJson() throws IOException {
    final EventBusMessageType messageType = EventBusMessageType.Byte;
    final Byte sentByte = Byte.MAX_VALUE;
    int port = findFreePort();
    String responseUrl = createHttpServerUrl(port);
    Map<String, String> expectations = createExpectations("someaddress",
            Base64.encodeAsString(sentByte.toString()), messageType, responseUrl, MediaType.APPLICATION_JSON);
    String responseBody = TemplateHelper.generateOutputUsingTemplate(SEND_RESPONSE_TEMPLATE_JSON, expectations);
    createHttpServer(port, MediaType.APPLICATION_JSON, responseBody);
    Handler<Message> messageConsumerHandler = new MessageSendWithReplyHandler(sentByte, expectations);
    vertx.eventBus().registerHandler(expectations.get("address"), messageConsumerHandler);
    String requestBody = TemplateHelper.generateOutputUsingTemplate(SEND_REQUEST_TEMPLATE_JSON, expectations);
    HttpRequestHelper.sendHttpPostRequest(url, requestBody, (VertxInternal) vertx,
            Status.ACCEPTED.getStatusCode(), MediaType.APPLICATION_JSON);
}

From source file:org.vertx.java.http.eventbusbridge.integration.MessageSendWithReplyTest.java

@Test
public void testSendingByteXmlWithResponseXml() throws IOException {
    final EventBusMessageType messageType = EventBusMessageType.Byte;
    final Byte sentByte = Byte.MAX_VALUE;
    int port = findFreePort();
    String responseUrl = createHttpServerUrl(port);
    Map<String, String> expectations = createExpectations("someaddress",
            Base64.encodeAsString(sentByte.toString()), messageType, responseUrl, MediaType.APPLICATION_XML);
    String responseBody = TemplateHelper.generateOutputUsingTemplate(SEND_RESPONSE_TEMPLATE_XML, expectations);
    createHttpServer(port, MediaType.APPLICATION_XML, responseBody);
    Handler<Message> messageConsumerHandler = new MessageSendWithReplyHandler(sentByte, expectations);
    vertx.eventBus().registerHandler(expectations.get("address"), messageConsumerHandler);
    String requestBody = TemplateHelper.generateOutputUsingTemplate(SEND_REQUEST_TEMPLATE_XML, expectations);
    HttpRequestHelper.sendHttpPostRequest(url, requestBody, (VertxInternal) vertx,
            Status.ACCEPTED.getStatusCode(), MediaType.APPLICATION_XML);
}

From source file:com.jaspersoft.jasperserver.api.engine.scheduling.quartz.ReportExecutionJob.java

protected Output getOutput(Byte format, String baseFilename) throws JobExecutionException {
    if (format == null) {
        throw new JSException("jsexception.report.unknown.output.format", new Object[] { "null" });
    }// www. ja v  a 2  s. c  o  m

    if (format == ReportJob.OUTPUT_FORMAT_DATA_SNAPSHOT) {
        hasDataSnapshotOutput = true;
        dataSnapshotOutputName = baseFilename;
        return null;
    }

    String fileExtension = (String) getOutputKeyMapping().get(format.toString());
    if (fileExtension == null) {
        throw new JSException("jsexception.report.unknown.output.format", new Object[] { new Byte(format) });
    }

    return (Output) getOutputFormatMap().get(fileExtension);
}

From source file:org.jboss.bqt.client.xml.XMLQueryVisitationStrategy.java

/**
 * Produce an XML message for an instance of the Byte.
 * <br>/*from w w w  . j  a va  2  s  . c o  m*/
 * @param object the instance for which the message is to be produced.
 * @param parent the XML element that is to be the parent of the produced XML message.
 * @return the root element of the XML segment that was produced.
 * @exception JDOMException if there is an error producing the message.
 */
private Element produceMsg(Byte object, Element parent) throws JDOMException {

    // ----------------------
    // Create the Byte element ...
    // ----------------------
    Element byteElement = new Element(TagNames.Elements.BYTE);
    byteElement.setText(object.toString());
    if (parent != null) {
        byteElement = parent.addContent(byteElement);
    }

    return byteElement;
}

From source file:org.apache.hadoop.hive.ql.optimizer.physical.GenSparkSkewJoinProcessor.java

@SuppressWarnings("unchecked")
public static void processSkewJoin(JoinOperator joinOp, Task<? extends Serializable> currTask,
        ReduceWork reduceWork, ParseContext parseCtx) throws SemanticException {

    SparkWork currentWork = ((SparkTask) currTask).getWork();
    if (currentWork.getChildren(reduceWork).size() > 0) {
        LOG.warn("Skip runtime skew join as the ReduceWork has child work and hasn't been split.");
        return;//from   w ww. j  a va 2s  . c om
    }

    List<Task<? extends Serializable>> children = currTask.getChildTasks();

    Task<? extends Serializable> child = children != null && children.size() == 1 ? children.get(0) : null;

    Path baseTmpDir = parseCtx.getContext().getMRTmpPath();

    JoinDesc joinDescriptor = joinOp.getConf();
    Map<Byte, List<ExprNodeDesc>> joinValues = joinDescriptor.getExprs();
    int numAliases = joinValues.size();

    Map<Byte, Path> bigKeysDirMap = new HashMap<Byte, Path>();
    Map<Byte, Map<Byte, Path>> smallKeysDirMap = new HashMap<Byte, Map<Byte, Path>>();
    Map<Byte, Path> skewJoinJobResultsDir = new HashMap<Byte, Path>();
    Byte[] tags = joinDescriptor.getTagOrder();
    // for each joining table, set dir for big key and small keys properly
    for (int i = 0; i < numAliases; i++) {
        Byte alias = tags[i];
        bigKeysDirMap.put(alias, GenMRSkewJoinProcessor.getBigKeysDir(baseTmpDir, alias));
        Map<Byte, Path> smallKeysMap = new HashMap<Byte, Path>();
        smallKeysDirMap.put(alias, smallKeysMap);
        for (Byte src2 : tags) {
            if (!src2.equals(alias)) {
                smallKeysMap.put(src2, GenMRSkewJoinProcessor.getSmallKeysDir(baseTmpDir, alias, src2));
            }
        }
        skewJoinJobResultsDir.put(alias, GenMRSkewJoinProcessor.getBigKeysSkewJoinResultDir(baseTmpDir, alias));
    }

    joinDescriptor.setHandleSkewJoin(true);
    joinDescriptor.setBigKeysDirMap(bigKeysDirMap);
    joinDescriptor.setSmallKeysDirMap(smallKeysDirMap);
    joinDescriptor
            .setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVESKEWJOINKEY));

    // create proper table/column desc for spilled tables
    TableDesc keyTblDesc = (TableDesc) reduceWork.getKeyDesc().clone();
    List<String> joinKeys = Utilities.getColumnNames(keyTblDesc.getProperties());
    List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc.getProperties());

    Map<Byte, TableDesc> tableDescList = new HashMap<Byte, TableDesc>();
    Map<Byte, RowSchema> rowSchemaList = new HashMap<Byte, RowSchema>();
    Map<Byte, List<ExprNodeDesc>> newJoinValues = new HashMap<Byte, List<ExprNodeDesc>>();
    Map<Byte, List<ExprNodeDesc>> newJoinKeys = new HashMap<Byte, List<ExprNodeDesc>>();
    // used for create mapJoinDesc, should be in order
    List<TableDesc> newJoinValueTblDesc = new ArrayList<TableDesc>();

    for (int i = 0; i < tags.length; i++) {
        newJoinValueTblDesc.add(null);
    }

    for (int i = 0; i < numAliases; i++) {
        Byte alias = tags[i];
        List<ExprNodeDesc> valueCols = joinValues.get(alias);
        String colNames = "";
        String colTypes = "";
        int columnSize = valueCols.size();
        List<ExprNodeDesc> newValueExpr = new ArrayList<ExprNodeDesc>();
        List<ExprNodeDesc> newKeyExpr = new ArrayList<ExprNodeDesc>();
        ArrayList<ColumnInfo> columnInfos = new ArrayList<ColumnInfo>();

        boolean first = true;
        for (int k = 0; k < columnSize; k++) {
            TypeInfo type = valueCols.get(k).getTypeInfo();
            String newColName = i + "_VALUE_" + k; // any name, it does not matter.
            ColumnInfo columnInfo = new ColumnInfo(newColName, type, alias.toString(), false);
            columnInfos.add(columnInfo);
            newValueExpr.add(new ExprNodeColumnDesc(columnInfo.getType(), columnInfo.getInternalName(),
                    columnInfo.getTabAlias(), false));
            if (!first) {
                colNames = colNames + ",";
                colTypes = colTypes + ",";
            }
            first = false;
            colNames = colNames + newColName;
            colTypes = colTypes + valueCols.get(k).getTypeString();
        }

        // we are putting join keys at last part of the spilled table
        for (int k = 0; k < joinKeys.size(); k++) {
            if (!first) {
                colNames = colNames + ",";
                colTypes = colTypes + ",";
            }
            first = false;
            colNames = colNames + joinKeys.get(k);
            colTypes = colTypes + joinKeyTypes.get(k);
            ColumnInfo columnInfo = new ColumnInfo(joinKeys.get(k),
                    TypeInfoFactory.getPrimitiveTypeInfo(joinKeyTypes.get(k)), alias.toString(), false);
            columnInfos.add(columnInfo);
            newKeyExpr.add(new ExprNodeColumnDesc(columnInfo.getType(), columnInfo.getInternalName(),
                    columnInfo.getTabAlias(), false));
        }

        newJoinValues.put(alias, newValueExpr);
        newJoinKeys.put(alias, newKeyExpr);
        tableDescList.put(alias, Utilities.getTableDesc(colNames, colTypes));
        rowSchemaList.put(alias, new RowSchema(columnInfos));

        // construct value table Desc
        String valueColNames = "";
        String valueColTypes = "";
        first = true;
        for (int k = 0; k < columnSize; k++) {
            String newColName = i + "_VALUE_" + k; // any name, it does not matter.
            if (!first) {
                valueColNames = valueColNames + ",";
                valueColTypes = valueColTypes + ",";
            }
            valueColNames = valueColNames + newColName;
            valueColTypes = valueColTypes + valueCols.get(k).getTypeString();
            first = false;
        }
        newJoinValueTblDesc.set((byte) i, Utilities.getTableDesc(valueColNames, valueColTypes));
    }

    joinDescriptor.setSkewKeysValuesTables(tableDescList);
    joinDescriptor.setKeyTableDesc(keyTblDesc);

    // create N-1 map join tasks
    HashMap<Path, Task<? extends Serializable>> bigKeysDirToTaskMap = new HashMap<Path, Task<? extends Serializable>>();
    List<Serializable> listWorks = new ArrayList<Serializable>();
    List<Task<? extends Serializable>> listTasks = new ArrayList<Task<? extends Serializable>>();
    for (int i = 0; i < numAliases - 1; i++) {
        Byte src = tags[i];
        HiveConf hiveConf = new HiveConf(parseCtx.getConf(), GenSparkSkewJoinProcessor.class);
        SparkWork sparkWork = new SparkWork(parseCtx.getConf().getVar(HiveConf.ConfVars.HIVEQUERYID));
        Task<? extends Serializable> skewJoinMapJoinTask = TaskFactory.get(sparkWork, hiveConf);
        skewJoinMapJoinTask.setFetchSource(currTask.isFetchSource());

        // create N TableScans
        Operator<? extends OperatorDesc>[] parentOps = new TableScanOperator[tags.length];
        for (int k = 0; k < tags.length; k++) {
            Operator<? extends OperatorDesc> ts = GenMapRedUtils
                    .createTemporaryTableScanOperator(rowSchemaList.get((byte) k));
            ((TableScanOperator) ts).setTableDesc(tableDescList.get((byte) k));
            parentOps[k] = ts;
        }

        // create the MapJoinOperator
        String dumpFilePrefix = "mapfile" + PlanUtils.getCountForMapJoinDumpFilePrefix();
        MapJoinDesc mapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc, newJoinValues,
                newJoinValueTblDesc, newJoinValueTblDesc, joinDescriptor.getOutputColumnNames(), i,
                joinDescriptor.getConds(), joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin(),
                dumpFilePrefix);
        mapJoinDescriptor.setTagOrder(tags);
        mapJoinDescriptor.setHandleSkewJoin(false);
        mapJoinDescriptor.setNullSafes(joinDescriptor.getNullSafes());
        // temporarily, mark it as child of all the TS
        MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory.getAndMakeChild(mapJoinDescriptor, null,
                parentOps);

        // clone the original join operator, and replace it with the MJ
        // this makes sure MJ has the same downstream operator plan as the original join
        List<Operator<?>> reducerList = new ArrayList<Operator<?>>();
        reducerList.add(reduceWork.getReducer());
        Operator<? extends OperatorDesc> reducer = Utilities.cloneOperatorTree(parseCtx.getConf(), reducerList)
                .get(0);
        Preconditions.checkArgument(reducer instanceof JoinOperator,
                "Reducer should be join operator, but actually is " + reducer.getName());
        JoinOperator cloneJoinOp = (JoinOperator) reducer;
        List<Operator<? extends OperatorDesc>> childOps = cloneJoinOp.getChildOperators();
        for (Operator<? extends OperatorDesc> childOp : childOps) {
            childOp.replaceParent(cloneJoinOp, mapJoinOp);
        }
        mapJoinOp.setChildOperators(childOps);

        // set memory usage for the MJ operator
        setMemUsage(mapJoinOp, skewJoinMapJoinTask, parseCtx);

        // create N MapWorks and add them to the SparkWork
        MapWork bigMapWork = null;
        Map<Byte, Path> smallTblDirs = smallKeysDirMap.get(src);
        for (int j = 0; j < tags.length; j++) {
            MapWork mapWork = PlanUtils.getMapRedWork().getMapWork();
            sparkWork.add(mapWork);
            // This code has been only added for testing
            boolean mapperCannotSpanPartns = parseCtx.getConf()
                    .getBoolVar(HiveConf.ConfVars.HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS);
            mapWork.setMapperCannotSpanPartns(mapperCannotSpanPartns);
            Operator<? extends OperatorDesc> tableScan = parentOps[j];
            String alias = tags[j].toString();
            ArrayList<String> aliases = new ArrayList<String>();
            aliases.add(alias);
            Path path;
            if (j == i) {
                path = bigKeysDirMap.get(tags[j]);
                bigKeysDirToTaskMap.put(path, skewJoinMapJoinTask);
                bigMapWork = mapWork;
            } else {
                path = smallTblDirs.get(tags[j]);
            }
            mapWork.getPathToAliases().put(path.toString(), aliases);
            mapWork.getAliasToWork().put(alias, tableScan);
            PartitionDesc partitionDesc = new PartitionDesc(tableDescList.get(tags[j]), null);
            mapWork.getPathToPartitionInfo().put(path.toString(), partitionDesc);
            mapWork.getAliasToPartnInfo().put(alias, partitionDesc);
            mapWork.setName("Map " + GenSparkUtils.getUtils().getNextSeqNumber());
        }
        // connect all small dir map work to the big dir map work
        Preconditions.checkArgument(bigMapWork != null, "Haven't identified big dir MapWork");
        // these 2 flags are intended only for the big-key map work
        bigMapWork
                .setNumMapTasks(HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.HIVESKEWJOINMAPJOINNUMMAPTASK));
        bigMapWork
                .setMinSplitSize(HiveConf.getLongVar(hiveConf, HiveConf.ConfVars.HIVESKEWJOINMAPJOINMINSPLIT));
        // use HiveInputFormat so that we can control the number of map tasks
        bigMapWork.setInputformat(HiveInputFormat.class.getName());
        for (BaseWork work : sparkWork.getRoots()) {
            Preconditions.checkArgument(work instanceof MapWork,
                    "All root work should be MapWork, but got " + work.getClass().getSimpleName());
            if (work != bigMapWork) {
                sparkWork.connect(work, bigMapWork, new SparkEdgeProperty(SparkEdgeProperty.SHUFFLE_NONE));
            }
        }

        // insert SparkHashTableSink and Dummy operators
        for (int j = 0; j < tags.length; j++) {
            if (j != i) {
                insertSHTS(tags[j], (TableScanOperator) parentOps[j], bigMapWork);
            }
        }

        listWorks.add(skewJoinMapJoinTask.getWork());
        listTasks.add(skewJoinMapJoinTask);
    }
    if (children != null) {
        for (Task<? extends Serializable> tsk : listTasks) {
            for (Task<? extends Serializable> oldChild : children) {
                tsk.addDependentTask(oldChild);
            }
        }
    }
    if (child != null) {
        currTask.removeDependentTask(child);
        listTasks.add(child);
        listWorks.add(child.getWork());
    }
    ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx context = new ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx(
            bigKeysDirToTaskMap, child);

    ConditionalWork cndWork = new ConditionalWork(listWorks);
    ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork, parseCtx.getConf());
    cndTsk.setListTasks(listTasks);
    cndTsk.setResolver(new ConditionalResolverSkewJoin());
    cndTsk.setResolverCtx(context);
    currTask.setChildTasks(new ArrayList<Task<? extends Serializable>>());
    currTask.addDependentTask(cndTsk);
}