Example usage for java.util BitSet BitSet

List of usage examples for java.util BitSet BitSet

Introduction

In this page you can find the example usage for java.util BitSet BitSet.

Prototype

public BitSet() 

Source Link

Document

Creates a new bit set.

Usage

From source file:au.org.ala.delta.translation.intkey.IntkeyItemsFileWriter.java

private void writeTextAttributes(int filteredCharNumber, Character textChar) {
    int characterNumber = textChar.getCharacterId();

    List<String> values = new ArrayList<String>();
    BitSet inapplicableBits = new BitSet();
    Iterator<FilteredItem> items = _dataSet.filteredItems();
    while (items.hasNext()) {
        FilteredItem item = items.next();
        Attribute attribute = _dataSet.getAttribute(item.getItem().getItemNumber(), characterNumber);

        if (isInapplicable(attribute)) {
            inapplicableBits.set(item.getItemNumber() - 1);
        }/* ww  w.  j a  va  2 s  .  c  om*/

        if (attribute == null || attribute.isUnknown()) {
            values.add("");
            continue;
        }

        values.add(_formatter.formatCharacterComment(attribute.getValueAsString()));

    }
    _itemsFile.writeAttributeStrings(filteredCharNumber, inapplicableBits, values);
}

From source file:jetbrains.buildServer.clouds.azure.asm.connector.AzureApiConnector.java

private int getPortNumber(final String serviceName,
        final HostedServiceGetDetailedResponse.Deployment deployment) {
    final BitSet busyPorts = new BitSet();
    busyPorts.set(MIN_PORT_NUMBER, MAX_PORT_NUMBER);

    for (RoleInstance instance : deployment.getRoleInstances()) {
        for (InstanceEndpoint endpoint : instance.getInstanceEndpoints()) {
            final int port = endpoint.getPort();
            if (port >= MIN_PORT_NUMBER && port <= MAX_PORT_NUMBER) {
                busyPorts.set(port, false);
            }/*w ww . ja  va2  s .c  om*/
        }
    }

    for (Role role : deployment.getRoles()) {
        for (ConfigurationSet conf : role.getConfigurationSets()) {
            for (InputEndpoint endpoint : conf.getInputEndpoints()) {
                final int port = endpoint.getPort();
                if (port >= MIN_PORT_NUMBER && port <= MAX_PORT_NUMBER) {
                    busyPorts.set(port, false);
                }
            }
        }
    }

    final Map<String, Integer> map = DEPLOYMENT_OPS.get(serviceName);
    if (map != null) {
        final Iterator<String> iter = map.keySet().iterator();
        while (iter.hasNext()) {
            final String operationId = iter.next();
            if (isActionFinished(operationId)) {
                iter.remove();
            } else {
                busyPorts.set(map.get(operationId), false);
            }
        }
    }

    int portNumber = MIN_PORT_NUMBER;
    for (int i = MIN_PORT_NUMBER; i <= MAX_PORT_NUMBER; i++) {
        if (busyPorts.get(i)) {
            portNumber = i;
            break;
        }
    }

    return portNumber;
}

From source file:edu.uci.ics.hyracks.algebricks.rewriter.rules.ExtractCommonOperatorsRule.java

private boolean requiresMaterialization(List<Integer> groupClusterIds, int index) {
    Integer clusterId = groupClusterIds.get(index);
    BitSet blockingClusters = new BitSet();
    getAllBlockingClusterIds(clusterId, blockingClusters);
    if (!blockingClusters.isEmpty()) {
        for (int i = 0; i < groupClusterIds.size(); i++) {
            if (i == index) {
                continue;
            }/* w ww.  j a  v a2  s. c o  m*/
            if (blockingClusters.get(groupClusterIds.get(i))) {
                return true;
            }
        }
    }
    return false;
}

From source file:org.omnaest.utils.table.TableTest.java

@SuppressWarnings("cast")
@Test// www. j  a v a 2  s.  co  m
public void testRow() {
    Table<String> table = this.newTable(new String[][] { { "a", "b", "c" }, { "d", "e", "f" } }, String.class);

    String[] values = new String[] { "a", "b", "c" };
    table.addRowElements(values);

    {
        Row<String> row = table.row(0);
        assertEquals(Arrays.asList(values), ListUtils.valueOf((Iterable<String>) row));
    }
    {
        Row<String> row = table.row(1);
        assertEquals(Arrays.asList("d", "e", "f"), ListUtils.valueOf((Iterable<String>) row));
    }
    {
        Row<String> row = table.row(2);
        assertEquals(Arrays.asList("a", "b", "c"), ListUtils.valueOf((Iterable<String>) row));
    }
    {
        Row<String> row = table.row(0);
        row.setElement(1, "b2");
        assertEquals("b2", row.getElement(1));
    }
    {
        assertNull(table.row(-1));
    }
    {
        BitSet indexFilter = new BitSet();
        indexFilter.set(1);
        indexFilter.set(2);
        Iterable<Row<String>> rows = table.rows(indexFilter);
        assertEquals(2, IterableUtils.size(rows));
        assertEquals(table.row(1).id(), IterableUtils.elementAt(rows, 0).id());
        assertEquals(table.row(2).id(), IterableUtils.elementAt(rows, 1).id());
    }

}

From source file:MSUmpire.SpectrumParser.mzXMLParser.java

@Override
public ScanCollection GetScanCollectionMS1Window(XYData MS1Window, boolean IncludePeak, float startTime,
        float endTime) {
    if (dIA_Setting == null) {
        Logger.getRootLogger().error(filename + " is not DIA data");
        return null;
    }//from w  ww.  j  av a2s . c  o  m
    ScanCollection MS1WindowScanCollection = new ScanCollection(parameter.Resolution);

    List<MzXMLthreadUnit> ScanList = null;

    int StartScanNo = 0;
    int EndScanNo = 0;

    StartScanNo = GetStartScan(startTime);
    EndScanNo = GetEndScan(endTime);
    //        ArrayList<Integer> IncludedScans=new ArrayList<>();
    final BitSet IncludedScans = new BitSet();
    for (int scannum : dIA_Setting.MS1Windows.get(MS1Window)) {
        if (scannum >= StartScanNo && scannum <= EndScanNo) {
            IncludedScans.set(scannum, true);
        }
    }

    ScanList = ParseScans(IncludedScans);

    for (MzXMLthreadUnit result : ScanList) {
        MS1WindowScanCollection.AddScan(result.scan);
        MS1WindowScanCollection.ElutionTimeToScanNoMap.put(result.scan.RetentionTime, result.scan.ScanNum);
    }
    ScanList.clear();
    ScanList = null;

    return MS1WindowScanCollection;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

@Override
@RetrySemantics.ReadOnly//from ww w .  ja va2  s.c  o  m
public GetOpenTxnsResponse getOpenTxns() throws MetaException {
    try {
        // We need to figure out the current transaction number and the list of
        // open transactions.  To avoid needing a transaction on the underlying
        // database we'll look at the current transaction number first.  If it
        // subsequently shows up in the open list that's ok.
        Connection dbConn = null;
        Statement stmt = null;
        ResultSet rs = null;
        try {
            /**
             * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
             */
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            String s = "select ntxn_next - 1 from NEXT_TXN_ID";
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            if (!rs.next()) {
                throw new MetaException(
                        "Transaction tables not properly " + "initialized, no record found in next_txn_id");
            }
            long hwm = rs.getLong(1);
            if (rs.wasNull()) {
                throw new MetaException(
                        "Transaction tables not properly " + "initialized, null record found in next_txn_id");
            }
            close(rs);
            List<Long> openList = new ArrayList<Long>();
            //need the WHERE clause below to ensure consistent results with READ_COMMITTED
            s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id";
            LOG.debug("Going to execute query<" + s + ">");
            rs = stmt.executeQuery(s);
            long minOpenTxn = Long.MAX_VALUE;
            BitSet abortedBits = new BitSet();
            while (rs.next()) {
                long txnId = rs.getLong(1);
                openList.add(txnId);
                char c = rs.getString(2).charAt(0);
                if (c == TXN_OPEN) {
                    minOpenTxn = Math.min(minOpenTxn, txnId);
                } else if (c == TXN_ABORTED) {
                    abortedBits.set(openList.size() - 1);
                }
            }
            LOG.debug("Going to rollback");
            dbConn.rollback();
            ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
            GetOpenTxnsResponse otr = new GetOpenTxnsResponse(hwm, openList, byteBuffer);
            if (minOpenTxn < Long.MAX_VALUE) {
                otr.setMin_open_txn(minOpenTxn);
            }
            return otr;
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "getOpenTxns");
            throw new MetaException(
                    "Unable to select from transaction database, " + StringUtils.stringifyException(e));
        } finally {
            close(rs, stmt, dbConn);
        }
    } catch (RetryException e) {
        return getOpenTxns();
    }
}

From source file:org.apache.hadoop.hive.ql.optimizer.SharedWorkOptimizer.java

private static boolean areMergeable(ParseContext pctx, SharedWorkOptimizerCache optimizerCache,
        TableScanOperator tsOp1, TableScanOperator tsOp2) throws SemanticException {
    // First we check if the two table scan operators can actually be merged
    // If schemas do not match, we currently do not merge
    List<String> prevTsOpNeededColumns = tsOp1.getNeededColumns();
    List<String> tsOpNeededColumns = tsOp2.getNeededColumns();
    if (prevTsOpNeededColumns.size() != tsOpNeededColumns.size()) {
        return false;
    }/* w w  w  .  j  a v a  2 s. c  o  m*/
    boolean notEqual = false;
    for (int i = 0; i < prevTsOpNeededColumns.size(); i++) {
        if (!prevTsOpNeededColumns.get(i).equals(tsOpNeededColumns.get(i))) {
            notEqual = true;
            break;
        }
    }
    if (notEqual) {
        return false;
    }
    // If row limit does not match, we currently do not merge
    if (tsOp1.getConf().getRowLimit() != tsOp2.getConf().getRowLimit()) {
        return false;
    }
    // If partitions do not match, we currently do not merge
    PrunedPartitionList prevTsOpPPList = pctx.getPrunedPartitions(tsOp1);
    PrunedPartitionList tsOpPPList = pctx.getPrunedPartitions(tsOp2);
    if (!prevTsOpPPList.getPartitions().equals(tsOpPPList.getPartitions())) {
        return false;
    }
    // If is a DPP, check if actually it refers to same target, column, etc.
    // Further, the DPP value needs to be generated from same subtree
    List<Operator<?>> dppsOp1 = new ArrayList<>(optimizerCache.tableScanToDPPSource.get(tsOp1));
    List<Operator<?>> dppsOp2 = new ArrayList<>(optimizerCache.tableScanToDPPSource.get(tsOp2));
    if (dppsOp1.isEmpty() && dppsOp2.isEmpty()) {
        return true;
    }
    for (int i = 0; i < dppsOp1.size(); i++) {
        Operator<?> op = dppsOp1.get(i);
        if (op instanceof ReduceSinkOperator) {
            Set<Operator<?>> ascendants = findAscendantWorkOperators(pctx, optimizerCache, op);
            if (ascendants.contains(tsOp2)) {
                dppsOp1.remove(i);
                i--;
            }
        }
    }
    for (int i = 0; i < dppsOp2.size(); i++) {
        Operator<?> op = dppsOp2.get(i);
        if (op instanceof ReduceSinkOperator) {
            Set<Operator<?>> ascendants = findAscendantWorkOperators(pctx, optimizerCache, op);
            if (ascendants.contains(tsOp1)) {
                dppsOp2.remove(i);
                i--;
            }
        }
    }
    if (dppsOp1.size() != dppsOp2.size()) {
        // Only first or second operator contains DPP pruning
        return false;
    }
    // Check if DPP branches are equal
    for (int i = 0; i < dppsOp1.size(); i++) {
        Operator<?> dppOp1 = dppsOp1.get(i);
        BitSet bs = new BitSet();
        for (int j = 0; j < dppsOp2.size(); j++) {
            if (!bs.get(j)) {
                // If not visited yet
                Operator<?> dppOp2 = dppsOp2.get(j);
                if (compareAndGatherOps(pctx, dppOp1, dppOp2) != null) {
                    // The DPP operator/branch are equal
                    bs.set(j);
                    break;
                }
            }
        }
        if (bs.cardinality() == i) {
            return false;
        }
    }
    return true;
}

From source file:edu.uci.ics.hyracks.algebricks.rewriter.rules.ExtractCommonOperatorsRule.java

private void computeClusters(Mutable<ILogicalOperator> parentRef, Mutable<ILogicalOperator> opRef,
        MutableInt currentClusterId) {//from  ww  w.  j av  a 2  s.  co  m
    // only replicate operator has multiple outputs
    int outputIndex = 0;
    if (opRef.getValue().getOperatorTag() == LogicalOperatorTag.REPLICATE) {
        ReplicateOperator rop = (ReplicateOperator) opRef.getValue();
        List<Mutable<ILogicalOperator>> outputs = rop.getOutputs();
        for (outputIndex = 0; outputIndex < outputs.size(); outputIndex++) {
            if (outputs.get(outputIndex).equals(parentRef)) {
                break;
            }
        }
    }
    AbstractLogicalOperator aop = (AbstractLogicalOperator) opRef.getValue();
    Pair<int[], int[]> labels = aop.getPhysicalOperator().getInputOutputDependencyLabels(opRef.getValue());
    List<Mutable<ILogicalOperator>> inputs = opRef.getValue().getInputs();
    for (int i = 0; i < inputs.size(); i++) {
        Mutable<ILogicalOperator> inputRef = inputs.get(i);
        if (labels.second[outputIndex] == 1 && labels.first[i] == 0) { // 1 -> 0
            if (labels.second.length == 1) {
                clusterMap.put(opRef, currentClusterId);
                // start a new cluster
                MutableInt newClusterId = new MutableInt(++lastUsedClusterId);
                computeClusters(opRef, inputRef, newClusterId);
                BitSet waitForList = clusterWaitForMap.get(currentClusterId.getValue());
                if (waitForList == null) {
                    waitForList = new BitSet();
                    clusterWaitForMap.put(currentClusterId.getValue(), waitForList);
                }
                waitForList.set(newClusterId.getValue());
            }
        } else { // 0 -> 0 and 1 -> 1
            MutableInt prevClusterId = clusterMap.get(opRef);
            if (prevClusterId == null || prevClusterId.getValue().equals(currentClusterId.getValue())) {
                clusterMap.put(opRef, currentClusterId);
                computeClusters(opRef, inputRef, currentClusterId);
            } else {
                // merge prevClusterId and currentClusterId: update all the map entries that has currentClusterId to prevClusterId
                for (BitSet bs : clusterWaitForMap.values()) {
                    if (bs.get(currentClusterId.getValue())) {
                        bs.clear(currentClusterId.getValue());
                        bs.set(prevClusterId.getValue());
                    }
                }
                currentClusterId.setValue(prevClusterId.getValue());
            }
        }
    }
}

From source file:org.apache.hyracks.algebricks.rewriter.rules.ExtractCommonOperatorsRule.java

private void computeClusters(Mutable<ILogicalOperator> parentRef, Mutable<ILogicalOperator> opRef,
        MutableInt currentClusterId) {/*from w w  w.  ja v a2s  . c  o  m*/
    // only replicate operator has multiple outputs
    int outputIndex = 0;
    if (opRef.getValue().getOperatorTag() == LogicalOperatorTag.REPLICATE) {
        ReplicateOperator rop = (ReplicateOperator) opRef.getValue();
        List<Mutable<ILogicalOperator>> outputs = rop.getOutputs();
        for (outputIndex = 0; outputIndex < outputs.size(); outputIndex++) {
            if (outputs.get(outputIndex).equals(parentRef)) {
                break;
            }
        }
    }
    AbstractLogicalOperator aop = (AbstractLogicalOperator) opRef.getValue();
    Pair<int[], int[]> labels = aop.getPhysicalOperator().getInputOutputDependencyLabels(opRef.getValue());
    List<Mutable<ILogicalOperator>> inputs = opRef.getValue().getInputs();
    for (int i = 0; i < inputs.size(); i++) {
        Mutable<ILogicalOperator> inputRef = inputs.get(i);
        if (labels.second[outputIndex] == 1 && labels.first[i] == 0) { // 1 -> 0
            if (labels.second.length == 1) {
                clusterMap.put(opRef, currentClusterId);
                // start a new cluster
                MutableInt newClusterId = new MutableInt(++lastUsedClusterId);
                computeClusters(opRef, inputRef, newClusterId);
                BitSet waitForList = clusterWaitForMap.get(currentClusterId.getValue());
                if (waitForList == null) {
                    waitForList = new BitSet();
                    clusterWaitForMap.put(currentClusterId.getValue(), waitForList);
                }
                waitForList.set(newClusterId.getValue());
            }
        } else { // 0 -> 0 and 1 -> 1
            MutableInt prevClusterId = clusterMap.get(opRef);
            if (prevClusterId == null || prevClusterId.getValue().equals(currentClusterId.getValue())) {
                clusterMap.put(opRef, currentClusterId);
                computeClusters(opRef, inputRef, currentClusterId);
            } else {
                // merge prevClusterId and currentClusterId: update all the map entries that has currentClusterId to prevClusterId
                for (BitSet bs : clusterWaitForMap.values()) {
                    if (bs.get(currentClusterId.getValue())) {
                        bs.clear(currentClusterId.getValue());
                        bs.set(prevClusterId.getValue());
                    }
                }
                clusterWaitForMap.remove(currentClusterId.getValue());
                currentClusterId.setValue(prevClusterId.getValue());
            }
        }
    }
}

From source file:gov.noaa.pfel.erddap.dataset.EDDTableFromHttpGet.java

/** 
     * This is used to add insert or delete commands into a data file of this dataset. 
     * This is EDDTableFromHttpGet overwriting the default implementation.
     */*  ww w  .j  a  v  a  2 s  . co m*/
     * <p>The key should be author_secret. So keys are specific to specific people/actors.
     * The author will be kept and added to the 'author' column in the dataset.
     *
     * <p>INSERT works like SQL's INSERT and UPDATE.
     * If the info matches existing values of sortColumnSourceNames,
     * the previous data is updated/overwritten. Otherwise, it is inserted.
     *
     * <p>DELETE works like SQL's DELETE
     *
     * @param tDirStructureColumnNames the column names for the parts of the 
     *   dir and file names. All of these names must be in requiredColumnNames.
     * @param keys the valid values of author= (to authenticate the author)
     * @param columnNames the names of all of the dataset's source variables.
     *   This does not include timestamp, author, or command.
     *   The time variable must be named time.
     * @param columnUnits any of them may be null or "".
     *   All timestamp columns (in the general sense) should have UDUNITS 
     *   String time units (e.g., "yyyy-MM-dd'T'HH:mm:ss") 
     *   or numeric time units (e.g., "days since 1985-01-01").
     *   For INSERT and DELETE calls, the time values must be in that format
     *   (you can't revert to ISO 8601 format as with data requests in the rest of ERDDAP).
     * @param columnTypes the Java names for the types (e.g., double).
     *   The missing values are the default missing values for PrimitiveArrays.
     *   All timestamp columns MUST be doubles.
     *   'long' is not supported because .nc3 files don't support longs.
     * @param columnStringLengths -1 if not a string column.
     * @param requiredColumnNames the names which identify a unique row.
     *   RequiredColumnNames MUST all be in columnNames.
     *   Insert requests MUST have all of the requiredColumnNames and usually have all 
     *     columnNames + author. Missing columns will get (standard PrimitiveArray) 
     *     missing values.
     *   Delete requests MUST have all of the requiredColumnNames and, in addition,
     *     usually have just author. Other columns are irrelevant.
     *   This should be as minimal as possible, and always includes time:  
     *   For TimeSeries: stationID, time.
     *   For Trajectory: trajectoryID, time.
     *   For Profile: stationID, time, depth.
     *   For TimeSeriesProfile: stationID, time, depth.
     *   For TrajectoryProfile: trajectoryID, time, depth.
     * @param command INSERT_COMMAND or DELETE_COMMAND
     * @param userDapQuery the param string, still percent-encoded
     * @param dirTable  a copy of the dirTable  (changes may be made to it) or null.
     * @param fileTable a copy of the fileTable (changes may be made to it) or null.
     * @return the response string 
     * @throws Throwable if any kind of trouble
     */
    public static String insertOrDelete(String startDir, StringArray tDirStructureColumnNames,
            IntArray tDirStructureNs, IntArray tDirStructureCalendars, HashSet<String> keys, String columnNames[],
            String columnUnits[], String columnTypes[], int columnStringLengths[], String requiredColumnNames[],
            byte command, String userDapQuery, Table dirTable, Table fileTable) throws Throwable {

        double timestamp = System.currentTimeMillis() / 1000.0;
        if (dirTable == null || fileTable == null) { //ensure both or neither
            dirTable = null;
            fileTable = null;
        }

        //store values parallelling columnNames
        int nColumns = columnNames.length;
        PrimitiveArray columnValues[] = new PrimitiveArray[nColumns];
        Class columnClasses[] = new Class[nColumns];
        DataType columnDataTypes[] = new DataType[nColumns];
        boolean columnIsString[] = new boolean[nColumns];
        int timeColumn = -1;
        DateTimeFormatter timeFormatter = null; //used if time variable is string
        double timeBaseAndFactor[] = null; //used if time variable is numeric
        for (int col = 0; col < nColumns; col++) {
            if (!String2.isSomething(columnUnits[col]))
                columnUnits[col] = "";

            if (columnNames[col].equals(EDV.TIME_NAME)) {
                timeColumn = col;
                if (columnIsString[col]) {
                    if (columnUnits[col].toLowerCase().indexOf("yyyy") < 0) //was "yy"
                        throw new SimpleException(
                                EDStatic.queryError + "Invalid units for the string time variable. "
                                        + "Units MUST specify the format of the time values.");
                    timeFormatter = DateTimeFormat.forPattern(columnUnits[col]).withZone(ZoneId.of("UTC"));
                } else { //numeric time values
                    timeBaseAndFactor = Calendar2.getTimeBaseAndFactor(columnUnits[col]); //throws RuntimeException if trouble
                }
            }

            if (columnTypes[col].equals("String")) {
                columnClasses[col] = String.class;
                columnDataTypes[col] = DataType.STRING;
                columnIsString[col] = true;
                if (columnStringLengths[col] < 1 || columnStringLengths[col] > 64000)
                    throw new SimpleException(EDStatic.queryError + "Invalid string length="
                            + columnStringLengths[col] + " for column=" + columnNames[col] + ".");
            } else {
                columnClasses[col] = PrimitiveArray.elementStringToClass(columnTypes[col]);
                columnDataTypes[col] = NcHelper.getDataType(columnClasses[col]);
            }
        }

        //parse the userDapQuery's parts. Ensure it is valid. 
        String parts[] = String2.split(userDapQuery, '&');
        int nParts = parts.length;
        String author = null; //the part before '_'
        int arraySize = -1; //until an array is found
        BitSet requiredColumnsFound = new BitSet();
        for (int p = 0; p < nParts; p++) {
            parts[p] = SSR.percentDecode(parts[p]);
            int eqPo = parts[p].indexOf('=');
            if (eqPo <= 0 || //no '=' or no name
                    "<>~!".indexOf(parts[p].charAt(eqPo - 1)) >= 0) // <= >= != ~=
                throw new SimpleException(
                        EDStatic.queryError + "The \"" + parts[p] + "\" parameter isn't in the form name=value.");
            String tName = parts[p].substring(0, eqPo);
            String tValue = parts[p].substring(eqPo + 1);
            if (tValue.startsWith("~")) // =~
                throw new SimpleException(
                        EDStatic.queryError + "The \"" + parts[p] + "\" parameter isn't in the form name=value.");

            //catch and verify author=
            if (tName.equals(AUTHOR)) {
                if (author != null)
                    throw new SimpleException(EDStatic.queryError + "There are two parameters with name=author.");
                if (!keys.contains(tValue))
                    throw new SimpleException(EDStatic.queryError + "Invalid author_key.");
                if (p != nParts - 1)
                    throw new SimpleException(EDStatic.queryError + "name=author must be the last parameter.");
                int po = Math.max(0, tValue.indexOf('_'));
                author = tValue.substring(0, po); //may be ""

            } else {
                //is it a requiredColumn?
                int whichRC = String2.indexOf(requiredColumnNames, tName);
                if (whichRC >= 0)
                    requiredColumnsFound.set(whichRC);

                //whichColumn? 
                int whichCol = String2.indexOf(columnNames, tName);
                if (whichCol < 0)
                    throw new SimpleException(EDStatic.queryError + "Unknown columnName=" + tName);
                if (columnValues[whichCol] != null)
                    throw new SimpleException(
                            EDStatic.queryError + "There are two parameters with columnName=" + tName + ".");

                //get the values
                if (tValue.startsWith("[") && tValue.endsWith("]")) {
                    //deal with array of values: name=[valuesCSV]
                    columnValues[whichCol] = PrimitiveArray.csvFactory(columnClasses[whichCol], tValue);
                    if (arraySize < 0)
                        arraySize = columnValues[whichCol].size();
                    else if (arraySize != columnValues[whichCol].size())
                        throw new SimpleException(
                                EDStatic.queryError + "Different parameters with arrays have different sizes: "
                                        + arraySize + "!=" + columnValues[whichCol].size() + ".");

                } else {
                    //deal with single value: name=value
                    columnValues[whichCol] = PrimitiveArray.csvFactory(columnClasses[whichCol], tValue);

                    if (columnClasses[whichCol] == String.class && (tValue.length() < 2 || tValue.charAt(0) != '"'
                            || tValue.charAt(tValue.length() - 1) != '"'))
                        throw new SimpleException(EDStatic.queryError + "The String value for columnName=" + tName
                                + " must start and end with \"'s.");
                    if (columnValues[whichCol].size() != 1)
                        throw new SimpleException(
                                EDStatic.queryError + "One value (not " + columnValues[whichCol].size()
                                        + ") expected for columnName=" + tName + ". (missing [ ] ?)");
                }
            }
        }

        //ensure required parameters were specified 
        if (author == null)
            throw new SimpleException(EDStatic.queryError + "author= was not specified.");
        int notFound = requiredColumnsFound.nextClearBit(0);
        if (notFound < requiredColumnNames.length)
            throw new SimpleException(EDStatic.queryError + "requiredColumnName=" + requiredColumnNames[notFound]
                    + " wasn't specified.");

        //make all columnValues the same size
        //(timestamp, author, command are separate and have just 1 value)
        int maxSize = Math.max(1, arraySize);
        for (int col = 0; col < nColumns; col++) {
            PrimitiveArray pa = columnValues[col];
            if (pa == null) {
                //this var wasn't in the command, so use mv's
                columnValues[col] = PrimitiveArray.factory(columnClasses[col], maxSize, "");
            } else if (pa.size() == 1 && maxSize > 1) {
                columnValues[col] = PrimitiveArray.factory(columnClasses[col], maxSize, pa.getString(0));
            }
        }

        //figure out the fullFileName for each row
        StringArray fullFileNames = new StringArray(maxSize, false);
        for (int row = 0; row < maxSize; row++) {
            //figure out the epochSeconds time value
            double tTime = timeColumn < 0 ? Double.NaN : //no time column
                    timeBaseAndFactor == null
                            ? Calendar2.toEpochSeconds(columnValues[timeColumn].getString(row), timeFormatter)
                            : Calendar2.unitsSinceToEpochSeconds( //numeric time
                                    timeBaseAndFactor[0], timeBaseAndFactor[1],
                                    columnValues[timeColumn].getDouble(row));

            fullFileNames.add(whichFile(startDir, tDirStructureColumnNames, tDirStructureNs, tDirStructureCalendars,
                    columnNames, columnValues, row, tTime));
        }

        //EVERYTHING SHOULD BE VALIDATED BY NOW. NO ERRORS AFTER HERE!
        //append each input row to the appropriate file
        Array oneTimestampArray = Array.factory(new double[] { timestamp });
        //I reported to netcdf-java mailing list: this generated null pointer exception in 4.6.6:
        // String tsar[] = new String[]{author};
        // Array oneAuthorArray    = Array.factory(tsar); //new String[]{author});
        //This works:
        ArrayString.D1 oneAuthorArray = new ArrayString.D1(1);
        oneAuthorArray.set(0, author);

        Array oneCommandArray = Array.factory(new byte[] { command });
        int row = 0;
        while (row < maxSize) {
            //figure out which file
            String fullFileName = fullFileNames.get(row);

            //open the file
            NetcdfFileWriter file = null;
            boolean fileIsNew = false;
            int[] origin = new int[1];
            try {

                Group rootGroup = null;
                Dimension rowDim = null;
                Variable vars[] = new Variable[nColumns];
                Variable timestampVar = null;
                Variable authorVar = null;
                Variable commandVar = null;
                if (File2.isFile(fullFileName)) {
                    file = NetcdfFileWriter.openExisting(fullFileName);
                    rootGroup = file.addGroup(null, "");
                    rowDim = rootGroup.findDimension("row");

                    //find Variables for columnNames.   May be null, but shouldn't be.
                    StringArray columnsNotFound = new StringArray();
                    for (int col = 0; col < nColumns; col++) {
                        vars[col] = rootGroup.findVariable(columnNames[col]);
                        if (vars[col] == null)
                            columnsNotFound.add(columnNames[col]);
                    }
                    timestampVar = rootGroup.findVariable(TIMESTAMP);
                    authorVar = rootGroup.findVariable(AUTHOR);
                    commandVar = rootGroup.findVariable(COMMAND);
                    if (timestampVar == null)
                        columnsNotFound.add(TIMESTAMP);
                    if (authorVar == null)
                        columnsNotFound.add(AUTHOR);
                    if (commandVar == null)
                        columnsNotFound.add(COMMAND);
                    if (columnsNotFound.size() > 0)
                        throw new SimpleException(MustBe.InternalError + ": column(s)=" + columnsNotFound
                                + " not found in " + fullFileName);

                } else {
                    //if file doesn't exist, create it
                    fileIsNew = true; //first
                    file = NetcdfFileWriter.createNew(NetcdfFileWriter.Version.netcdf3, fullFileName);
                    rootGroup = file.addGroup(null, "");
                    rowDim = file.addUnlimitedDimension("row");
                    ArrayList rowDimAL = new ArrayList();
                    rowDimAL.add(rowDim);

                    //define Variables
                    for (int col = 0; col < nColumns; col++) {
                        String cName = columnNames[col];
                        String cType = columnTypes[col];
                        if (columnIsString[col]) {
                            vars[col] = file.addStringVariable(rootGroup, cName, rowDimAL,
                                    columnStringLengths[col]);
                        } else {
                            vars[col] = file.addVariable(rootGroup, cName, columnDataTypes[col], rowDimAL);
                        }
                    }
                    timestampVar = file.addVariable(rootGroup, TIMESTAMP, DataType.DOUBLE, rowDimAL);
                    authorVar = file.addStringVariable(rootGroup, AUTHOR, rowDimAL, AUTHOR_STRLEN);
                    commandVar = file.addVariable(rootGroup, COMMAND, DataType.BYTE, rowDimAL);

                    // create the file
                    file.create();
                }

                //append the series of commands that go to this fullFileName
                int startRow = row++;
                while (row < maxSize && fullFileNames.get(row).equals(fullFileName))
                    row++;
                int stopRow = row; //1 past end

                //which row in the file table?
                int fileTableRow = -1;
                if (fileTable != null) {
                    //already in fileTable?
                    //fileTableRow = ...

                    //add to fileTable
                }

                //write the data to the file
                origin[0] = rowDim.getLength();
                for (int col = 0; col < nColumns; col++) {
                    PrimitiveArray subsetPA = columnValues[col];
                    if (startRow > 0 || stopRow != maxSize)
                        subsetPA = subsetPA.subset(startRow, 1, stopRow - 1); //inclusive
                    file.write(vars[col], origin, Array.factory(subsetPA.toObjectArray()));

                    //adjust min/max in fileTable
                    if (fileTable != null && command == INSERT_COMMAND) {
                        if (columnIsString[col]) {
                            //fileTableRow...   
                        } else {
                            double stats[] = subsetPA.calculateStats();
                            if (stats[PrimitiveArray.STATS_N] > 0) { //has some non MVs
                                //fileTableRow... Math.min(  , stats[PrimitiveArray.STATS_MIN]));
                                //fileTableRow....Math.max(  , stats[PrimitiveArray.STATS_MAX]));
                            }
                            if (stats[PrimitiveArray.STATS_N] < stopRow - startRow) {
                                //fileTableRow... hasMV
                            }
                        }
                    }
                }
                Array timestampArray = oneTimestampArray;
                Array authorArray = oneAuthorArray;
                Array commandArray = oneCommandArray;
                if (stopRow - startRow > 1) {
                    //double timestampAr[] = new double[stopRow - startRow]; 
                    //String authorAr[]    = new String[stopRow - startRow];
                    //byte   commandAr[]   = new byte  [stopRow - startRow];
                    //Arrays.fill(timestampAr, timestamp);
                    //Arrays.fill(authorAr,    author);
                    //Arrays.fill(commandAr,   command);
                    //timestampArray = Array.factory(timestampAr);
                    //authorArray    = Array.factory(authorAr);
                    //commandArray   = Array.factory(commandAr);

                    int thisShape[] = new int[] { stopRow - startRow };
                    timestampArray = Array.factoryConstant(double.class, thisShape, new Double(timestamp));
                    authorArray = Array.factoryConstant(String.class, thisShape, author);
                    commandArray = Array.factoryConstant(byte.class, thisShape, new Byte(command));
                }
                file.write(timestampVar, origin, timestampArray);
                file.writeStringData(authorVar, origin, authorArray);
                file.write(commandVar, origin, commandArray);

                //adjust min/max in fileTable
                if (fileTable != null && command == INSERT_COMMAND) {
                    //fileTableRow... Math.min(   , timestamp));
                    //fileTableRow....Math.max(   , timestamp));

                    //fileTableRow... Math.min(   , author));
                    //fileTableRow....Math.max(   , author));

                    //fileTableRow... Math.min(   , command));
                    //fileTableRow....Math.max(   , command));
                }

                //make it so!
                file.flush(); //force file update

                //close the file
                file.close();
                file = null;

            } catch (Throwable t) {
                if (file != null) {
                    try {
                        file.close();
                    } catch (Throwable t2) {
                    }
                }
                if (fileIsNew)
                    File2.delete(fullFileName);
                String2.log(
                        String2.ERROR + " while " + (fileIsNew ? "creating" : "adding to") + " " + fullFileName);
                throw t;
            }
        }

        //Don't ever change any of this (except adding somthing new to the end). 
        //Clients rely on it.
        return "SUCCESS: Data received. No errors. timestamp=" + Calendar2.epochSecondsToIsoStringT3(timestamp)
                + "Z=" + timestamp + " seconds since 1970-01-01T00:00:00Z.\n";
    }