Example usage for java.util BitSet set

List of usage examples for java.util BitSet set

Introduction

In this page you can find the example usage for java.util BitSet set.

Prototype

public void set(int bitIndex) 

Source Link

Document

Sets the bit at the specified index to true .

Usage

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Runs a job using the provided environment to read a sequence of records from a single Kafka topic.
 * The method allows to individually specify the expected starting offset and total read value count of each partition.
 * The job will be considered successful only if all partition read results match the start offset and value count criteria.
 *//*  w  w w.  j a  va  2  s.co m*/
protected void readSequence(final StreamExecutionEnvironment env, final StartupMode startupMode,
        final Map<KafkaTopicPartition, Long> specificStartupOffsets, final Properties cc,
        final String topicName,
        final Map<Integer, Tuple2<Integer, Integer>> partitionsToValuesCountAndStartOffset) throws Exception {
    final int sourceParallelism = partitionsToValuesCountAndStartOffset.keySet().size();

    int finalCountTmp = 0;
    for (Map.Entry<Integer, Tuple2<Integer, Integer>> valuesCountAndStartOffset : partitionsToValuesCountAndStartOffset
            .entrySet()) {
        finalCountTmp += valuesCountAndStartOffset.getValue().f0;
    }
    final int finalCount = finalCountTmp;

    final TypeInformation<Tuple2<Integer, Integer>> intIntTupleType = TypeInfoParser
            .parse("Tuple2<Integer, Integer>");

    final TypeInformationSerializationSchema<Tuple2<Integer, Integer>> deser = new TypeInformationSerializationSchema<>(
            intIntTupleType, env.getConfig());

    // create the consumer
    cc.putAll(secureProps);
    FlinkKafkaConsumerBase<Tuple2<Integer, Integer>> consumer = kafkaServer.getConsumer(topicName, deser, cc);
    switch (startupMode) {
    case EARLIEST:
        consumer.setStartFromEarliest();
        break;
    case LATEST:
        consumer.setStartFromLatest();
        break;
    case SPECIFIC_OFFSETS:
        consumer.setStartFromSpecificOffsets(specificStartupOffsets);
        break;
    case GROUP_OFFSETS:
        consumer.setStartFromGroupOffsets();
        break;
    }

    DataStream<Tuple2<Integer, Integer>> source = env.addSource(consumer).setParallelism(sourceParallelism)
            .map(new ThrottledMapper<Tuple2<Integer, Integer>>(20)).setParallelism(sourceParallelism);

    // verify data
    source.flatMap(new RichFlatMapFunction<Tuple2<Integer, Integer>, Integer>() {

        private HashMap<Integer, BitSet> partitionsToValueCheck;
        private int count = 0;

        @Override
        public void open(Configuration parameters) throws Exception {
            partitionsToValueCheck = new HashMap<>();
            for (Integer partition : partitionsToValuesCountAndStartOffset.keySet()) {
                partitionsToValueCheck.put(partition, new BitSet());
            }
        }

        @Override
        public void flatMap(Tuple2<Integer, Integer> value, Collector<Integer> out) throws Exception {
            int partition = value.f0;
            int val = value.f1;

            BitSet bitSet = partitionsToValueCheck.get(partition);
            if (bitSet == null) {
                throw new RuntimeException("Got a record from an unknown partition");
            } else {
                bitSet.set(val - partitionsToValuesCountAndStartOffset.get(partition).f1);
            }

            count++;

            LOG.info("Received message {}, total {} messages", value, count);

            // verify if we've seen everything
            if (count == finalCount) {
                for (Map.Entry<Integer, BitSet> partitionsToValueCheck : this.partitionsToValueCheck
                        .entrySet()) {
                    BitSet check = partitionsToValueCheck.getValue();
                    int expectedValueCount = partitionsToValuesCountAndStartOffset
                            .get(partitionsToValueCheck.getKey()).f0;

                    if (check.cardinality() != expectedValueCount) {
                        throw new RuntimeException("Expected cardinality to be " + expectedValueCount
                                + ", but was " + check.cardinality());
                    } else if (check.nextClearBit(0) != expectedValueCount) {
                        throw new RuntimeException("Expected next clear bit to be " + expectedValueCount
                                + ", but was " + check.cardinality());
                    }
                }

                // test has passed
                throw new SuccessException();
            }
        }

    }).setParallelism(1);

    tryExecute(env, "Read data from Kafka");

    LOG.info("Successfully read sequence for verification");
}

From source file:gov.noaa.pfel.erddap.dataset.EDDTableFromHttpGet.java

/** 
     * This is used to add insert or delete commands into a data file of this dataset. 
     * This is EDDTableFromHttpGet overwriting the default implementation.
     *//ww w  . ja  v a 2  s .  co m
     * <p>The key should be author_secret. So keys are specific to specific people/actors.
     * The author will be kept and added to the 'author' column in the dataset.
     *
     * <p>INSERT works like SQL's INSERT and UPDATE.
     * If the info matches existing values of sortColumnSourceNames,
     * the previous data is updated/overwritten. Otherwise, it is inserted.
     *
     * <p>DELETE works like SQL's DELETE
     *
     * @param tDirStructureColumnNames the column names for the parts of the 
     *   dir and file names. All of these names must be in requiredColumnNames.
     * @param keys the valid values of author= (to authenticate the author)
     * @param columnNames the names of all of the dataset's source variables.
     *   This does not include timestamp, author, or command.
     *   The time variable must be named time.
     * @param columnUnits any of them may be null or "".
     *   All timestamp columns (in the general sense) should have UDUNITS 
     *   String time units (e.g., "yyyy-MM-dd'T'HH:mm:ss") 
     *   or numeric time units (e.g., "days since 1985-01-01").
     *   For INSERT and DELETE calls, the time values must be in that format
     *   (you can't revert to ISO 8601 format as with data requests in the rest of ERDDAP).
     * @param columnTypes the Java names for the types (e.g., double).
     *   The missing values are the default missing values for PrimitiveArrays.
     *   All timestamp columns MUST be doubles.
     *   'long' is not supported because .nc3 files don't support longs.
     * @param columnStringLengths -1 if not a string column.
     * @param requiredColumnNames the names which identify a unique row.
     *   RequiredColumnNames MUST all be in columnNames.
     *   Insert requests MUST have all of the requiredColumnNames and usually have all 
     *     columnNames + author. Missing columns will get (standard PrimitiveArray) 
     *     missing values.
     *   Delete requests MUST have all of the requiredColumnNames and, in addition,
     *     usually have just author. Other columns are irrelevant.
     *   This should be as minimal as possible, and always includes time:  
     *   For TimeSeries: stationID, time.
     *   For Trajectory: trajectoryID, time.
     *   For Profile: stationID, time, depth.
     *   For TimeSeriesProfile: stationID, time, depth.
     *   For TrajectoryProfile: trajectoryID, time, depth.
     * @param command INSERT_COMMAND or DELETE_COMMAND
     * @param userDapQuery the param string, still percent-encoded
     * @param dirTable  a copy of the dirTable  (changes may be made to it) or null.
     * @param fileTable a copy of the fileTable (changes may be made to it) or null.
     * @return the response string 
     * @throws Throwable if any kind of trouble
     */
    public static String insertOrDelete(String startDir, StringArray tDirStructureColumnNames,
            IntArray tDirStructureNs, IntArray tDirStructureCalendars, HashSet<String> keys, String columnNames[],
            String columnUnits[], String columnTypes[], int columnStringLengths[], String requiredColumnNames[],
            byte command, String userDapQuery, Table dirTable, Table fileTable) throws Throwable {

        double timestamp = System.currentTimeMillis() / 1000.0;
        if (dirTable == null || fileTable == null) { //ensure both or neither
            dirTable = null;
            fileTable = null;
        }

        //store values parallelling columnNames
        int nColumns = columnNames.length;
        PrimitiveArray columnValues[] = new PrimitiveArray[nColumns];
        Class columnClasses[] = new Class[nColumns];
        DataType columnDataTypes[] = new DataType[nColumns];
        boolean columnIsString[] = new boolean[nColumns];
        int timeColumn = -1;
        DateTimeFormatter timeFormatter = null; //used if time variable is string
        double timeBaseAndFactor[] = null; //used if time variable is numeric
        for (int col = 0; col < nColumns; col++) {
            if (!String2.isSomething(columnUnits[col]))
                columnUnits[col] = "";

            if (columnNames[col].equals(EDV.TIME_NAME)) {
                timeColumn = col;
                if (columnIsString[col]) {
                    if (columnUnits[col].toLowerCase().indexOf("yyyy") < 0) //was "yy"
                        throw new SimpleException(
                                EDStatic.queryError + "Invalid units for the string time variable. "
                                        + "Units MUST specify the format of the time values.");
                    timeFormatter = DateTimeFormat.forPattern(columnUnits[col]).withZone(ZoneId.of("UTC"));
                } else { //numeric time values
                    timeBaseAndFactor = Calendar2.getTimeBaseAndFactor(columnUnits[col]); //throws RuntimeException if trouble
                }
            }

            if (columnTypes[col].equals("String")) {
                columnClasses[col] = String.class;
                columnDataTypes[col] = DataType.STRING;
                columnIsString[col] = true;
                if (columnStringLengths[col] < 1 || columnStringLengths[col] > 64000)
                    throw new SimpleException(EDStatic.queryError + "Invalid string length="
                            + columnStringLengths[col] + " for column=" + columnNames[col] + ".");
            } else {
                columnClasses[col] = PrimitiveArray.elementStringToClass(columnTypes[col]);
                columnDataTypes[col] = NcHelper.getDataType(columnClasses[col]);
            }
        }

        //parse the userDapQuery's parts. Ensure it is valid. 
        String parts[] = String2.split(userDapQuery, '&');
        int nParts = parts.length;
        String author = null; //the part before '_'
        int arraySize = -1; //until an array is found
        BitSet requiredColumnsFound = new BitSet();
        for (int p = 0; p < nParts; p++) {
            parts[p] = SSR.percentDecode(parts[p]);
            int eqPo = parts[p].indexOf('=');
            if (eqPo <= 0 || //no '=' or no name
                    "<>~!".indexOf(parts[p].charAt(eqPo - 1)) >= 0) // <= >= != ~=
                throw new SimpleException(
                        EDStatic.queryError + "The \"" + parts[p] + "\" parameter isn't in the form name=value.");
            String tName = parts[p].substring(0, eqPo);
            String tValue = parts[p].substring(eqPo + 1);
            if (tValue.startsWith("~")) // =~
                throw new SimpleException(
                        EDStatic.queryError + "The \"" + parts[p] + "\" parameter isn't in the form name=value.");

            //catch and verify author=
            if (tName.equals(AUTHOR)) {
                if (author != null)
                    throw new SimpleException(EDStatic.queryError + "There are two parameters with name=author.");
                if (!keys.contains(tValue))
                    throw new SimpleException(EDStatic.queryError + "Invalid author_key.");
                if (p != nParts - 1)
                    throw new SimpleException(EDStatic.queryError + "name=author must be the last parameter.");
                int po = Math.max(0, tValue.indexOf('_'));
                author = tValue.substring(0, po); //may be ""

            } else {
                //is it a requiredColumn?
                int whichRC = String2.indexOf(requiredColumnNames, tName);
                if (whichRC >= 0)
                    requiredColumnsFound.set(whichRC);

                //whichColumn? 
                int whichCol = String2.indexOf(columnNames, tName);
                if (whichCol < 0)
                    throw new SimpleException(EDStatic.queryError + "Unknown columnName=" + tName);
                if (columnValues[whichCol] != null)
                    throw new SimpleException(
                            EDStatic.queryError + "There are two parameters with columnName=" + tName + ".");

                //get the values
                if (tValue.startsWith("[") && tValue.endsWith("]")) {
                    //deal with array of values: name=[valuesCSV]
                    columnValues[whichCol] = PrimitiveArray.csvFactory(columnClasses[whichCol], tValue);
                    if (arraySize < 0)
                        arraySize = columnValues[whichCol].size();
                    else if (arraySize != columnValues[whichCol].size())
                        throw new SimpleException(
                                EDStatic.queryError + "Different parameters with arrays have different sizes: "
                                        + arraySize + "!=" + columnValues[whichCol].size() + ".");

                } else {
                    //deal with single value: name=value
                    columnValues[whichCol] = PrimitiveArray.csvFactory(columnClasses[whichCol], tValue);

                    if (columnClasses[whichCol] == String.class && (tValue.length() < 2 || tValue.charAt(0) != '"'
                            || tValue.charAt(tValue.length() - 1) != '"'))
                        throw new SimpleException(EDStatic.queryError + "The String value for columnName=" + tName
                                + " must start and end with \"'s.");
                    if (columnValues[whichCol].size() != 1)
                        throw new SimpleException(
                                EDStatic.queryError + "One value (not " + columnValues[whichCol].size()
                                        + ") expected for columnName=" + tName + ". (missing [ ] ?)");
                }
            }
        }

        //ensure required parameters were specified 
        if (author == null)
            throw new SimpleException(EDStatic.queryError + "author= was not specified.");
        int notFound = requiredColumnsFound.nextClearBit(0);
        if (notFound < requiredColumnNames.length)
            throw new SimpleException(EDStatic.queryError + "requiredColumnName=" + requiredColumnNames[notFound]
                    + " wasn't specified.");

        //make all columnValues the same size
        //(timestamp, author, command are separate and have just 1 value)
        int maxSize = Math.max(1, arraySize);
        for (int col = 0; col < nColumns; col++) {
            PrimitiveArray pa = columnValues[col];
            if (pa == null) {
                //this var wasn't in the command, so use mv's
                columnValues[col] = PrimitiveArray.factory(columnClasses[col], maxSize, "");
            } else if (pa.size() == 1 && maxSize > 1) {
                columnValues[col] = PrimitiveArray.factory(columnClasses[col], maxSize, pa.getString(0));
            }
        }

        //figure out the fullFileName for each row
        StringArray fullFileNames = new StringArray(maxSize, false);
        for (int row = 0; row < maxSize; row++) {
            //figure out the epochSeconds time value
            double tTime = timeColumn < 0 ? Double.NaN : //no time column
                    timeBaseAndFactor == null
                            ? Calendar2.toEpochSeconds(columnValues[timeColumn].getString(row), timeFormatter)
                            : Calendar2.unitsSinceToEpochSeconds( //numeric time
                                    timeBaseAndFactor[0], timeBaseAndFactor[1],
                                    columnValues[timeColumn].getDouble(row));

            fullFileNames.add(whichFile(startDir, tDirStructureColumnNames, tDirStructureNs, tDirStructureCalendars,
                    columnNames, columnValues, row, tTime));
        }

        //EVERYTHING SHOULD BE VALIDATED BY NOW. NO ERRORS AFTER HERE!
        //append each input row to the appropriate file
        Array oneTimestampArray = Array.factory(new double[] { timestamp });
        //I reported to netcdf-java mailing list: this generated null pointer exception in 4.6.6:
        // String tsar[] = new String[]{author};
        // Array oneAuthorArray    = Array.factory(tsar); //new String[]{author});
        //This works:
        ArrayString.D1 oneAuthorArray = new ArrayString.D1(1);
        oneAuthorArray.set(0, author);

        Array oneCommandArray = Array.factory(new byte[] { command });
        int row = 0;
        while (row < maxSize) {
            //figure out which file
            String fullFileName = fullFileNames.get(row);

            //open the file
            NetcdfFileWriter file = null;
            boolean fileIsNew = false;
            int[] origin = new int[1];
            try {

                Group rootGroup = null;
                Dimension rowDim = null;
                Variable vars[] = new Variable[nColumns];
                Variable timestampVar = null;
                Variable authorVar = null;
                Variable commandVar = null;
                if (File2.isFile(fullFileName)) {
                    file = NetcdfFileWriter.openExisting(fullFileName);
                    rootGroup = file.addGroup(null, "");
                    rowDim = rootGroup.findDimension("row");

                    //find Variables for columnNames.   May be null, but shouldn't be.
                    StringArray columnsNotFound = new StringArray();
                    for (int col = 0; col < nColumns; col++) {
                        vars[col] = rootGroup.findVariable(columnNames[col]);
                        if (vars[col] == null)
                            columnsNotFound.add(columnNames[col]);
                    }
                    timestampVar = rootGroup.findVariable(TIMESTAMP);
                    authorVar = rootGroup.findVariable(AUTHOR);
                    commandVar = rootGroup.findVariable(COMMAND);
                    if (timestampVar == null)
                        columnsNotFound.add(TIMESTAMP);
                    if (authorVar == null)
                        columnsNotFound.add(AUTHOR);
                    if (commandVar == null)
                        columnsNotFound.add(COMMAND);
                    if (columnsNotFound.size() > 0)
                        throw new SimpleException(MustBe.InternalError + ": column(s)=" + columnsNotFound
                                + " not found in " + fullFileName);

                } else {
                    //if file doesn't exist, create it
                    fileIsNew = true; //first
                    file = NetcdfFileWriter.createNew(NetcdfFileWriter.Version.netcdf3, fullFileName);
                    rootGroup = file.addGroup(null, "");
                    rowDim = file.addUnlimitedDimension("row");
                    ArrayList rowDimAL = new ArrayList();
                    rowDimAL.add(rowDim);

                    //define Variables
                    for (int col = 0; col < nColumns; col++) {
                        String cName = columnNames[col];
                        String cType = columnTypes[col];
                        if (columnIsString[col]) {
                            vars[col] = file.addStringVariable(rootGroup, cName, rowDimAL,
                                    columnStringLengths[col]);
                        } else {
                            vars[col] = file.addVariable(rootGroup, cName, columnDataTypes[col], rowDimAL);
                        }
                    }
                    timestampVar = file.addVariable(rootGroup, TIMESTAMP, DataType.DOUBLE, rowDimAL);
                    authorVar = file.addStringVariable(rootGroup, AUTHOR, rowDimAL, AUTHOR_STRLEN);
                    commandVar = file.addVariable(rootGroup, COMMAND, DataType.BYTE, rowDimAL);

                    // create the file
                    file.create();
                }

                //append the series of commands that go to this fullFileName
                int startRow = row++;
                while (row < maxSize && fullFileNames.get(row).equals(fullFileName))
                    row++;
                int stopRow = row; //1 past end

                //which row in the file table?
                int fileTableRow = -1;
                if (fileTable != null) {
                    //already in fileTable?
                    //fileTableRow = ...

                    //add to fileTable
                }

                //write the data to the file
                origin[0] = rowDim.getLength();
                for (int col = 0; col < nColumns; col++) {
                    PrimitiveArray subsetPA = columnValues[col];
                    if (startRow > 0 || stopRow != maxSize)
                        subsetPA = subsetPA.subset(startRow, 1, stopRow - 1); //inclusive
                    file.write(vars[col], origin, Array.factory(subsetPA.toObjectArray()));

                    //adjust min/max in fileTable
                    if (fileTable != null && command == INSERT_COMMAND) {
                        if (columnIsString[col]) {
                            //fileTableRow...   
                        } else {
                            double stats[] = subsetPA.calculateStats();
                            if (stats[PrimitiveArray.STATS_N] > 0) { //has some non MVs
                                //fileTableRow... Math.min(  , stats[PrimitiveArray.STATS_MIN]));
                                //fileTableRow....Math.max(  , stats[PrimitiveArray.STATS_MAX]));
                            }
                            if (stats[PrimitiveArray.STATS_N] < stopRow - startRow) {
                                //fileTableRow... hasMV
                            }
                        }
                    }
                }
                Array timestampArray = oneTimestampArray;
                Array authorArray = oneAuthorArray;
                Array commandArray = oneCommandArray;
                if (stopRow - startRow > 1) {
                    //double timestampAr[] = new double[stopRow - startRow]; 
                    //String authorAr[]    = new String[stopRow - startRow];
                    //byte   commandAr[]   = new byte  [stopRow - startRow];
                    //Arrays.fill(timestampAr, timestamp);
                    //Arrays.fill(authorAr,    author);
                    //Arrays.fill(commandAr,   command);
                    //timestampArray = Array.factory(timestampAr);
                    //authorArray    = Array.factory(authorAr);
                    //commandArray   = Array.factory(commandAr);

                    int thisShape[] = new int[] { stopRow - startRow };
                    timestampArray = Array.factoryConstant(double.class, thisShape, new Double(timestamp));
                    authorArray = Array.factoryConstant(String.class, thisShape, author);
                    commandArray = Array.factoryConstant(byte.class, thisShape, new Byte(command));
                }
                file.write(timestampVar, origin, timestampArray);
                file.writeStringData(authorVar, origin, authorArray);
                file.write(commandVar, origin, commandArray);

                //adjust min/max in fileTable
                if (fileTable != null && command == INSERT_COMMAND) {
                    //fileTableRow... Math.min(   , timestamp));
                    //fileTableRow....Math.max(   , timestamp));

                    //fileTableRow... Math.min(   , author));
                    //fileTableRow....Math.max(   , author));

                    //fileTableRow... Math.min(   , command));
                    //fileTableRow....Math.max(   , command));
                }

                //make it so!
                file.flush(); //force file update

                //close the file
                file.close();
                file = null;

            } catch (Throwable t) {
                if (file != null) {
                    try {
                        file.close();
                    } catch (Throwable t2) {
                    }
                }
                if (fileIsNew)
                    File2.delete(fullFileName);
                String2.log(
                        String2.ERROR + " while " + (fileIsNew ? "creating" : "adding to") + " " + fullFileName);
                throw t;
            }
        }

        //Don't ever change any of this (except adding somthing new to the end). 
        //Clients rely on it.
        return "SUCCESS: Data received. No errors. timestamp=" + Calendar2.epochSecondsToIsoStringT3(timestamp)
                + "Z=" + timestamp + " seconds since 1970-01-01T00:00:00Z.\n";
    }

From source file:org.docx4j.fonts.fop.fonts.truetype.TTFFile.java

private boolean readUnicodeCmap(FontFileReader in, long cmapUniOffset, int encodingID) throws IOException {
    //Read CMAP table and correct mtxTab.index
    int mtxPtr = 0;

    // Read unicode cmap
    seekTab(in, "cmap", cmapUniOffset);
    int cmapFormat = in.readTTFUShort();
    /*int cmap_length =*/ in.readTTFUShort(); //skip cmap length

    if (log.isDebugEnabled()) {
        log.debug("CMAP format: " + cmapFormat);
    }/*from w  w  w .ja va  2 s. c  o  m*/

    if (cmapFormat == 4) {
        in.skip(2); // Skip version number
        int cmapSegCountX2 = in.readTTFUShort();
        int cmapSearchRange = in.readTTFUShort();
        int cmapEntrySelector = in.readTTFUShort();
        int cmapRangeShift = in.readTTFUShort();

        if (log.isDebugEnabled()) {
            log.debug("segCountX2   : " + cmapSegCountX2);
            log.debug("searchRange  : " + cmapSearchRange);
            log.debug("entrySelector: " + cmapEntrySelector);
            log.debug("rangeShift   : " + cmapRangeShift);
        }

        int[] cmapEndCounts = new int[cmapSegCountX2 / 2];
        int[] cmapStartCounts = new int[cmapSegCountX2 / 2];
        int[] cmapDeltas = new int[cmapSegCountX2 / 2];
        int[] cmapRangeOffsets = new int[cmapSegCountX2 / 2];

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapEndCounts[i] = in.readTTFUShort();
        }

        in.skip(2); // Skip reservedPad

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapStartCounts[i] = in.readTTFUShort();
        }

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapDeltas[i] = in.readTTFShort();
        }

        //int startRangeOffset = in.getCurrentPos();

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapRangeOffsets[i] = in.readTTFUShort();
        }

        int glyphIdArrayOffset = in.getCurrentPos();

        BitSet eightBitGlyphs = new BitSet(256);

        // Insert the unicode id for the glyphs in mtxTab
        // and fill in the cmaps ArrayList

        for (int i = 0; i < cmapStartCounts.length; i++) {

            if (log.isTraceEnabled()) {
                log.trace(i + ": " + cmapStartCounts[i] + " - " + cmapEndCounts[i]);
            }
            if (log.isDebugEnabled()) {
                if (isInPrivateUseArea(cmapStartCounts[i], cmapEndCounts[i])) {
                    log.debug("Font contains glyphs in the Unicode private use area:"
                            + Integer.toHexString(cmapStartCounts[i]) + " - "
                            + Integer.toHexString(cmapEndCounts[i]));
                }
            }

            for (int j = cmapStartCounts[i]; j <= cmapEndCounts[i]; j++) {

                // Update lastChar
                if (j < 256 && j > lastChar) {
                    lastChar = (short) j;
                }

                if (j < 256) {
                    eightBitGlyphs.set(j);
                }

                if (mtxPtr < mtxTab.length) {
                    int glyphIdx;
                    // the last character 65535 = .notdef
                    // may have a range offset
                    if (cmapRangeOffsets[i] != 0 && j != 65535) {
                        int glyphOffset = glyphIdArrayOffset + ((cmapRangeOffsets[i] / 2)
                                + (j - cmapStartCounts[i]) + (i) - cmapSegCountX2 / 2) * 2;
                        in.seekSet(glyphOffset);
                        glyphIdx = (in.readTTFUShort() + cmapDeltas[i]) & 0xffff;

                        unicodeMapping.add(new UnicodeMapping(glyphIdx, j));
                        mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));

                        if (encodingID == 0 && j >= 0xF020 && j <= 0xF0FF) {
                            //Experimental: Mapping 0xF020-0xF0FF to 0x0020-0x00FF
                            //Tested with Wingdings and Symbol TTF fonts which map their
                            //glyphs in the region 0xF020-0xF0FF.
                            int mapped = j - 0xF000;
                            if (!eightBitGlyphs.get(mapped)) {
                                //Only map if Unicode code point hasn't been mapped before
                                unicodeMapping.add(new UnicodeMapping(glyphIdx, mapped));
                                mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(mapped));
                            }
                        }

                        // Also add winAnsiWidth
                        List v = (List) ansiIndex.get(new Integer(j));
                        if (v != null) {
                            Iterator e = v.listIterator();
                            while (e.hasNext()) {
                                Integer aIdx = (Integer) e.next();
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();

                                if (log.isTraceEnabled()) {
                                    log.trace("Added width " + mtxTab[glyphIdx].getWx() + " uni: " + j
                                            + " ansi: " + aIdx.intValue());
                                }
                            }
                        }

                        if (log.isTraceEnabled()) {
                            log.trace("Idx: " + glyphIdx + " Delta: " + cmapDeltas[i] + " Unicode: " + j
                                    + " name: " + mtxTab[glyphIdx].getName());
                        }
                    } else {
                        glyphIdx = (j + cmapDeltas[i]) & 0xffff;

                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        unicodeMapping.add(new UnicodeMapping(glyphIdx, j));
                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        // Also add winAnsiWidth
                        List v = (List) ansiIndex.get(new Integer(j));
                        if (v != null) {
                            Iterator e = v.listIterator();
                            while (e.hasNext()) {
                                Integer aIdx = (Integer) e.next();
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();
                            }
                        }

                        //getLogger().debug("IIdx: " +
                        //    mtxPtr +
                        //    " Delta: " + cmap_deltas[i] +
                        //    " Unicode: " + j +
                        //    " name: " +
                        //    mtxTab[(j+cmap_deltas[i]) & 0xffff].name);

                    }
                    if (glyphIdx < mtxTab.length) {
                        if (mtxTab[glyphIdx].getUnicodeIndex().size() < 2) {
                            mtxPtr++;
                        }
                    }
                }
            }
        }
    } else {
        log.error("Cmap format not supported: " + cmapFormat);
        return false;
    }
    return true;
}

From source file:hivemall.smile.classification.GradientTreeBoostingClassifierUDTF.java

/**
 * Train L-k tree boost./*from  w w w .ja v a  2  s. c  o m*/
 */
private void traink(final double[][] x, final int[] y, final int k) throws HiveException {
    final int numVars = SmileExtUtils.computeNumInputVars(_numVars, x);
    if (logger.isInfoEnabled()) {
        logger.info("k: " + k + ", numTrees: " + _numTrees + ", shirinkage: " + _eta + ", subsample: "
                + _subsample + ", numVars: " + numVars + ", minSamplesSplit: " + _minSamplesSplit
                + ", maxDepth: " + _maxDepth + ", maxLeafs: " + _maxLeafNodes + ", seed: " + _seed);
    }

    final int numInstances = x.length;
    final int numSamples = (int) Math.round(numInstances * _subsample);

    final double[][] h = new double[k][numInstances]; // boost tree output.
    final double[][] p = new double[k][numInstances]; // posteriori probabilities.
    final double[][] response = new double[k][numInstances]; // pseudo response.

    final int[][] order = SmileExtUtils.sort(_attributes, x);
    final RegressionTree.NodeOutput[] output = new LKNodeOutput[k];
    for (int i = 0; i < k; i++) {
        output[i] = new LKNodeOutput(response[i], k);
    }

    final BitSet sampled = new BitSet(numInstances);
    final int[] bag = new int[numSamples];
    final int[] perm = new int[numSamples];
    for (int i = 0; i < numSamples; i++) {
        perm[i] = i;
    }

    long s = (this._seed == -1L) ? SmileExtUtils.generateSeed() : new smile.math.Random(_seed).nextLong();
    final smile.math.Random rnd1 = new smile.math.Random(s);
    final smile.math.Random rnd2 = new smile.math.Random(rnd1.nextLong());

    // out-of-bag prediction
    final int[] prediction = new int[numInstances];

    for (int m = 0; m < _numTrees; m++) {
        for (int i = 0; i < numInstances; i++) {
            double max = Double.NEGATIVE_INFINITY;
            for (int j = 0; j < k; j++) {
                final double h_ji = h[j][i];
                if (max < h_ji) {
                    max = h_ji;
                }
            }
            double Z = 0.0d;
            for (int j = 0; j < k; j++) {
                double p_ji = Math.exp(h[j][i] - max);
                p[j][i] = p_ji;
                Z += p_ji;
            }
            for (int j = 0; j < k; j++) {
                p[j][i] /= Z;
            }
        }

        final RegressionTree[] trees = new RegressionTree[k];

        Arrays.fill(prediction, -1);
        double max_h = Double.NEGATIVE_INFINITY;
        int oobTests = 0, oobErrors = 0;

        for (int j = 0; j < k; j++) {
            reportProgress(_progressReporter);

            final double[] response_j = response[j];
            final double[] p_j = p[j];
            final double[] h_j = h[j];

            for (int i = 0; i < numInstances; i++) {
                if (y[i] == j) {
                    response_j[i] = 1.0d;
                } else {
                    response_j[i] = 0.0d;
                }
                response_j[i] -= p_j[i];
            }

            SmileExtUtils.shuffle(perm, rnd1);
            for (int i = 0; i < numSamples; i++) {
                int index = perm[i];
                bag[i] = index;
                sampled.set(i);
            }

            RegressionTree tree = new RegressionTree(_attributes, x, response[j], numVars, _maxDepth,
                    _maxLeafNodes, _minSamplesSplit, _minSamplesLeaf, order, bag, output[j], rnd2);
            trees[j] = tree;

            for (int i = 0; i < numInstances; i++) {
                double h_ji = h_j[i] + _eta * tree.predict(x[i]);
                h_j[i] += h_ji;
                if (h_ji > max_h) {
                    max_h = h_ji;
                    prediction[i] = j;
                }
            }

        } // for each k

        // out-of-bag error estimate
        for (int i = sampled.nextClearBit(0); i < numInstances; i = sampled.nextClearBit(i + 1)) {
            oobTests++;
            if (prediction[i] != y[i]) {
                oobErrors++;
            }
        }
        sampled.clear();
        float oobErrorRate = 0.f;
        if (oobTests > 0) {
            oobErrorRate = ((float) oobErrors) / oobTests;
        }

        // forward a row
        forward(m + 1, 0.d, _eta, oobErrorRate, trees);

    } // for each m
}

From source file:com.turn.griffin.data.GriffinUploadTask.java

private void uploadFile(FileInfo fileInfo, BitSet availableBlockBitmap) {

    String filename = fileInfo.getFilename();
    long fileVersion = fileInfo.getVersion();
    long blockCount = fileInfo.getBlockCount();
    long blockSize = fileInfo.getBlockSize();
    byte[] buffer = new byte[(int) blockSize];

    GriffinLibCacheUtil libCacheManager = dataManager.getLibCacheManager().get();
    String dataTopicNameForProducer = GriffinKafkaTopicNameUtil.getDataTopicNameForProducer(filename,
            fileVersion);//from   w  ww  . ja v a2 s. c o m
    GriffinProducer producer = null;
    try {
        String libCacheUploadFilePath = libCacheManager.getUploadFilePath(fileInfo);
        RandomAccessFile libCacheUploadFile = new RandomAccessFile(libCacheUploadFilePath, "r");
        producer = new GriffinProducer(GriffinModule.BROKERS);

        logger.info(String.format("Starting to push %s",
                fileInfo.toString().replaceAll(System.getProperty("line.separator"), " ")));

        int uploadAttempts = 0;
        while (availableBlockBitmap.nextClearBit(0) != blockCount) {

            /* If a new version has arrived abort uploading older version */
            if (!libCacheManager.isLatestGlobalVersion(fileInfo)) {
                logger.info(
                        String.format("Aborting upload for %s version %s as a newer version is now available.",
                                filename, fileVersion));
                break;
            }

            if (uploadAttempts >= maxUploadAttempts) {
                logger.warn(String.format("Unable to upload %s version %s after %s attempts", filename,
                        fileVersion, uploadAttempts));
                String subject = String.format("WARNING: GriffinUploadTask failed for blob:%s", filename);
                String body = String.format(
                        "Action: GriffinUploadTask failed for blob:%s version:%s%n"
                                + "Reason: Unable to upload after %s attempts%n",
                        filename, fileVersion, uploadAttempts);
                GriffinModule.emailAlert(subject, body);
                break;
            }

            int blockToUpload = availableBlockBitmap.nextClearBit(0);
            libCacheUploadFile.seek(blockToUpload * blockSize);
            int bytesRead = libCacheUploadFile.read(buffer);
            DataMessage msg = DataMessage.newBuilder().setBlockSeqNo(blockToUpload).setByteCount(bytesRead)
                    .setData(ByteString.copyFrom(buffer)).build();
            try {
                producer.send(dataTopicNameForProducer, DigestUtils.md5Hex(buffer), msg);
                availableBlockBitmap.set(blockToUpload);
                uploadAttempts = 0;
            } catch (FailedToSendMessageException ftsme) {
                /* Retry the same block again */
                logger.warn(String.format("Unable to send block %s for file: %s version: %s "
                        + "due to FailedToSendMessageException", blockToUpload, filename, fileVersion));
                uploadAttempts++;
            } catch (Exception e) {
                logger.warn(String.format("Unable to send block %s for file: %s version: %s", blockToUpload,
                        filename, fileVersion), e);
                logger.warn("Exception", e);
                uploadAttempts++;
            }
        }
        logger.info(String.format("Ending file upload for file %s version %s to %s", filename, fileVersion,
                dataTopicNameForProducer));
        libCacheUploadFile.close();
    } catch (IOException | RuntimeException e) {
        logger.error(String.format("Unable to upload file %s to %s", filename, dataTopicNameForProducer), e);
        String subject = String.format("WARNING: GriffinUploadTask failed for blob:%s", filename);
        String body = String.format(
                "Action: GriffinUploadTask failed for blob:%s version:%s%n"
                        + "Reason: Exception in GriffinUploadTask%n %s",
                filename, fileVersion, Throwables.getStackTraceAsString(e));
        GriffinModule.emailAlert(subject, body);
    } finally {
        if (producer != null) {
            producer.shutdown();
        }
    }

}

From source file:org.apache.cassandra.concurrent.LongSharedExecutorPoolTest.java

private void testPromptnessOfExecution(long intervalNanos, float loadIncrement)
        throws InterruptedException, ExecutionException {
    final int executorCount = 4;
    int threadCount = 8;
    int maxQueued = 1024;
    final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
    final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
    final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);

    final int[] threadCounts = new int[executorCount];
    final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
    final ExecutorService[] executors = new ExecutorService[executorCount];
    for (int i = 0; i < executors.length; i++) {
        executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i);
        threadCounts[i] = threadCount;//from w  w w. ja  v a  2 s.com
        workCount[i] = new WeibullDistribution(2, maxQueued);
        threadCount *= 2;
        maxQueued *= 2;
    }

    long runs = 0;
    long events = 0;
    final TreeSet<Batch> pending = new TreeSet<>();
    final BitSet executorsWithWork = new BitSet(executorCount);
    long until = 0;
    // basic idea is to go through different levels of load on the executor service; initially is all small batches
    // (mostly within max queue size) of very short operations, moving to progressively larger batches
    // (beyond max queued size), and longer operations
    for (float multiplier = 0f; multiplier < 2.01f;) {
        if (System.nanoTime() > until) {
            System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f,
                    events * 0.000001f));
            events = 0;
            until = System.nanoTime() + intervalNanos;
            multiplier += loadIncrement;
            System.out.println(String.format("Running for %ds with load multiplier %.1f",
                    TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
        }

        // wait a random amount of time so we submit new tasks in various stages of
        long timeout;
        if (pending.isEmpty())
            timeout = 0;
        else if (Math.random() > 0.98)
            timeout = Long.MAX_VALUE;
        else if (pending.size() == executorCount)
            timeout = pending.first().timeout;
        else
            timeout = (long) (Math.random() * pending.last().timeout);

        while (!pending.isEmpty() && timeout > System.nanoTime()) {
            Batch first = pending.first();
            boolean complete = false;
            try {
                for (Result result : first.results.descendingSet())
                    result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
                complete = true;
            } catch (TimeoutException e) {
            }
            if (!complete && System.nanoTime() > first.timeout) {
                for (Result result : first.results)
                    if (!result.future.isDone())
                        throw new AssertionError();
                complete = true;
            }
            if (complete) {
                pending.pollFirst();
                executorsWithWork.clear(first.executorIndex);
            }
        }

        // if we've emptied the executors, give all our threads an opportunity to spin down
        if (timeout == Long.MAX_VALUE)
            Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);

        // submit a random batch to the first free executor service
        int executorIndex = executorsWithWork.nextClearBit(0);
        if (executorIndex >= executorCount)
            continue;
        executorsWithWork.set(executorIndex);
        ExecutorService executor = executors[executorIndex];
        TreeSet<Result> results = new TreeSet<>();
        int count = (int) (workCount[executorIndex].sample() * multiplier);
        long targetTotalElapsed = 0;
        long start = System.nanoTime();
        long baseTime;
        if (Math.random() > 0.5)
            baseTime = 2 * (long) (workTime.sample() * multiplier);
        else
            baseTime = 0;
        for (int j = 0; j < count; j++) {
            long time;
            if (baseTime == 0)
                time = (long) (workTime.sample() * multiplier);
            else
                time = (long) (baseTime * Math.random());
            if (time < minWorkTime)
                time = minWorkTime;
            if (time > maxWorkTime)
                time = maxWorkTime;
            targetTotalElapsed += time;
            Future<?> future = executor.submit(new WaitTask(time));
            results.add(new Result(future, System.nanoTime() + time));
        }
        long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                + TimeUnit.MILLISECONDS.toNanos(100L);
        long now = System.nanoTime();
        if (runs++ > executorCount && now > end)
            throw new AssertionError();
        events += results.size();
        pending.add(new Batch(results, end, executorIndex));
        //            System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
    }
}

From source file:org.apache.fop.fonts.truetype.OpenFont.java

private boolean readUnicodeCmap(long cmapUniOffset, int encodingID) throws IOException {
    //Read CMAP table and correct mtxTab.index
    int mtxPtr = 0;

    // Read unicode cmap
    seekTab(fontFile, OFTableName.CMAP, cmapUniOffset);
    int cmapFormat = fontFile.readTTFUShort();
    /*int cmap_length =*/ fontFile.readTTFUShort(); //skip cmap length

    if (log.isDebugEnabled()) {
        log.debug("CMAP format: " + cmapFormat);
    }/*from   www.  java2 s. co m*/

    if (cmapFormat == 4) {
        fontFile.skip(2); // Skip version number
        int cmapSegCountX2 = fontFile.readTTFUShort();
        int cmapSearchRange = fontFile.readTTFUShort();
        int cmapEntrySelector = fontFile.readTTFUShort();
        int cmapRangeShift = fontFile.readTTFUShort();

        if (log.isDebugEnabled()) {
            log.debug("segCountX2   : " + cmapSegCountX2);
            log.debug("searchRange  : " + cmapSearchRange);
            log.debug("entrySelector: " + cmapEntrySelector);
            log.debug("rangeShift   : " + cmapRangeShift);
        }

        int[] cmapEndCounts = new int[cmapSegCountX2 / 2];
        int[] cmapStartCounts = new int[cmapSegCountX2 / 2];
        int[] cmapDeltas = new int[cmapSegCountX2 / 2];
        int[] cmapRangeOffsets = new int[cmapSegCountX2 / 2];

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapEndCounts[i] = fontFile.readTTFUShort();
        }

        fontFile.skip(2); // Skip reservedPad

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapStartCounts[i] = fontFile.readTTFUShort();
        }

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapDeltas[i] = fontFile.readTTFShort();
        }

        //int startRangeOffset = in.getCurrentPos();

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapRangeOffsets[i] = fontFile.readTTFUShort();
        }

        int glyphIdArrayOffset = fontFile.getCurrentPos();

        BitSet eightBitGlyphs = new BitSet(256);

        // Insert the unicode id for the glyphs in mtxTab
        // and fill in the cmaps ArrayList
        for (int i = 0; i < cmapStartCounts.length; i++) {

            if (log.isTraceEnabled()) {
                log.trace(i + ": " + cmapStartCounts[i] + " - " + cmapEndCounts[i]);
            }
            if (log.isDebugEnabled()) {
                if (isInPrivateUseArea(cmapStartCounts[i], cmapEndCounts[i])) {
                    log.debug("Font contains glyphs in the Unicode private use area: "
                            + Integer.toHexString(cmapStartCounts[i]) + " - "
                            + Integer.toHexString(cmapEndCounts[i]));
                }
            }

            for (int j = cmapStartCounts[i]; j <= cmapEndCounts[i]; j++) {

                // Update lastChar
                if (j < 256 && j > lastChar) {
                    lastChar = (short) j;
                }

                if (j < 256) {
                    eightBitGlyphs.set(j);
                }

                if (mtxPtr < mtxTab.length) {
                    int glyphIdx;
                    // the last character 65535 = .notdef
                    // may have a range offset
                    if (cmapRangeOffsets[i] != 0 && j != 65535) {
                        int glyphOffset = glyphIdArrayOffset + ((cmapRangeOffsets[i] / 2)
                                + (j - cmapStartCounts[i]) + (i) - cmapSegCountX2 / 2) * 2;
                        fontFile.seekSet(glyphOffset);
                        glyphIdx = (fontFile.readTTFUShort() + cmapDeltas[i]) & 0xffff;
                        //mtxTab[glyphIdx].setName(mtxTab[glyphIdx].getName() + " - "+(char)j);
                        unicodeMappings.add(new UnicodeMapping(this, glyphIdx, j));
                        mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));

                        if (encodingID == 0 && j >= 0xF020 && j <= 0xF0FF) {
                            //Experimental: Mapping 0xF020-0xF0FF to 0x0020-0x00FF
                            //Tested with Wingdings and Symbol TTF fonts which map their
                            //glyphs in the region 0xF020-0xF0FF.
                            int mapped = j - 0xF000;
                            if (!eightBitGlyphs.get(mapped)) {
                                //Only map if Unicode code point hasn't been mapped before
                                unicodeMappings.add(new UnicodeMapping(this, glyphIdx, mapped));
                                mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(mapped));
                            }
                        }

                        // Also add winAnsiWidth
                        List<Integer> v = ansiIndex.get(new Integer(j));
                        if (v != null) {
                            for (Integer aIdx : v) {
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();

                                if (log.isTraceEnabled()) {
                                    log.trace("Added width " + mtxTab[glyphIdx].getWx() + " uni: " + j
                                            + " ansi: " + aIdx.intValue());
                                }
                            }
                        }

                        if (log.isTraceEnabled()) {
                            log.trace("Idx: " + glyphIdx + " Delta: " + cmapDeltas[i] + " Unicode: " + j
                                    + " name: " + mtxTab[glyphIdx].getName());
                        }
                    } else {
                        glyphIdx = (j + cmapDeltas[i]) & 0xffff;

                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        unicodeMappings.add(new UnicodeMapping(this, glyphIdx, j));
                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        // Also add winAnsiWidth
                        List<Integer> v = ansiIndex.get(new Integer(j));
                        if (v != null) {
                            for (Integer aIdx : v) {
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();
                            }
                        }

                        //getLogger().debug("IIdx: " +
                        //    mtxPtr +
                        //    " Delta: " + cmap_deltas[i] +
                        //    " Unicode: " + j +
                        //    " name: " +
                        //    mtxTab[(j+cmap_deltas[i]) & 0xffff].name);

                    }
                    if (glyphIdx < mtxTab.length) {
                        if (mtxTab[glyphIdx].getUnicodeIndex().size() < 2) {
                            mtxPtr++;
                        }
                    }
                }
            }
        }
    } else {
        log.error("Cmap format not supported: " + cmapFormat);
        return false;
    }
    return true;
}

From source file:bes.injector.InjectorBurnTest.java

private void testPromptnessOfExecution(long intervalNanos, float loadIncrement)
        throws InterruptedException, ExecutionException, TimeoutException {
    final int executorCount = 4;
    int threadCount = 8;
    int maxQueued = 1024;
    final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
    final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
    final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);

    final int[] threadCounts = new int[executorCount];
    final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
    final ExecutorService[] executors = new ExecutorService[executorCount];
    final Injector injector = new Injector("");
    for (int i = 0; i < executors.length; i++) {
        executors[i] = injector.newExecutor(threadCount, maxQueued);
        threadCounts[i] = threadCount;//  www . j a  v a 2s  . c  om
        workCount[i] = new WeibullDistribution(2, maxQueued);
        threadCount *= 2;
        maxQueued *= 2;
    }

    long runs = 0;
    long events = 0;
    final TreeSet<Batch> pending = new TreeSet<Batch>();
    final BitSet executorsWithWork = new BitSet(executorCount);
    long until = 0;
    // basic idea is to go through different levels of load on the executor service; initially is all small batches
    // (mostly within max queue size) of very short operations, moving to progressively larger batches
    // (beyond max queued size), and longer operations
    for (float multiplier = 0f; multiplier < 2.01f;) {
        if (System.nanoTime() > until) {
            System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f,
                    events * 0.000001f));
            events = 0;
            until = System.nanoTime() + intervalNanos;
            multiplier += loadIncrement;
            System.out.println(String.format("Running for %ds with load multiplier %.1f",
                    TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
        }

        // wait a random amount of time so we submit new tasks in various stages of
        long timeout;
        if (pending.isEmpty())
            timeout = 0;
        else if (Math.random() > 0.98)
            timeout = Long.MAX_VALUE;
        else if (pending.size() == executorCount)
            timeout = pending.first().timeout;
        else
            timeout = (long) (Math.random() * pending.last().timeout);

        while (!pending.isEmpty() && timeout > System.nanoTime()) {
            Batch first = pending.first();
            boolean complete = false;
            try {
                for (Result result : first.results.descendingSet())
                    result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
                complete = true;
            } catch (TimeoutException e) {
            }
            if (!complete && System.nanoTime() > first.timeout) {
                for (Result result : first.results)
                    if (!result.future.isDone())
                        throw new AssertionError();
                complete = true;
            }
            if (complete) {
                pending.pollFirst();
                executorsWithWork.clear(first.executorIndex);
            }
        }

        // if we've emptied the executors, give all our threads an opportunity to spin down
        if (timeout == Long.MAX_VALUE) {
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
            }
        }

        // submit a random batch to the first free executor service
        int executorIndex = executorsWithWork.nextClearBit(0);
        if (executorIndex >= executorCount)
            continue;
        executorsWithWork.set(executorIndex);
        ExecutorService executor = executors[executorIndex];
        TreeSet<Result> results = new TreeSet<Result>();
        int count = (int) (workCount[executorIndex].sample() * multiplier);
        long targetTotalElapsed = 0;
        long start = System.nanoTime();
        long baseTime;
        if (Math.random() > 0.5)
            baseTime = 2 * (long) (workTime.sample() * multiplier);
        else
            baseTime = 0;
        for (int j = 0; j < count; j++) {
            long time;
            if (baseTime == 0)
                time = (long) (workTime.sample() * multiplier);
            else
                time = (long) (baseTime * Math.random());
            if (time < minWorkTime)
                time = minWorkTime;
            if (time > maxWorkTime)
                time = maxWorkTime;
            targetTotalElapsed += time;
            Future<?> future = executor.submit(new WaitTask(time));
            results.add(new Result(future, System.nanoTime() + time));
        }
        long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                + TimeUnit.MILLISECONDS.toNanos(100L);
        long now = System.nanoTime();
        if (runs++ > executorCount && now > end)
            throw new AssertionError();
        events += results.size();
        pending.add(new Batch(results, end, executorIndex));
        //            System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
    }
}

From source file:org.apache.fop.fonts.truetype.TTFFile.java

private boolean readUnicodeCmap // CSOK: MethodLength
(long cmapUniOffset, int encodingID) throws IOException {
    //Read CMAP table and correct mtxTab.index
    int mtxPtr = 0;

    // Read unicode cmap
    seekTab(fontFile, TTFTableName.CMAP, cmapUniOffset);
    int cmapFormat = fontFile.readTTFUShort();
    /*int cmap_length =*/ fontFile.readTTFUShort(); //skip cmap length

    if (log.isDebugEnabled()) {
        log.debug("CMAP format: " + cmapFormat);
    }/*from  w w  w.j a  v  a2  s . co  m*/

    if (cmapFormat == 4) {
        fontFile.skip(2); // Skip version number
        int cmapSegCountX2 = fontFile.readTTFUShort();
        int cmapSearchRange = fontFile.readTTFUShort();
        int cmapEntrySelector = fontFile.readTTFUShort();
        int cmapRangeShift = fontFile.readTTFUShort();

        if (log.isDebugEnabled()) {
            log.debug("segCountX2   : " + cmapSegCountX2);
            log.debug("searchRange  : " + cmapSearchRange);
            log.debug("entrySelector: " + cmapEntrySelector);
            log.debug("rangeShift   : " + cmapRangeShift);
        }

        int[] cmapEndCounts = new int[cmapSegCountX2 / 2];
        int[] cmapStartCounts = new int[cmapSegCountX2 / 2];
        int[] cmapDeltas = new int[cmapSegCountX2 / 2];
        int[] cmapRangeOffsets = new int[cmapSegCountX2 / 2];

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapEndCounts[i] = fontFile.readTTFUShort();
        }

        fontFile.skip(2); // Skip reservedPad

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapStartCounts[i] = fontFile.readTTFUShort();
        }

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapDeltas[i] = fontFile.readTTFShort();
        }

        //int startRangeOffset = in.getCurrentPos();

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapRangeOffsets[i] = fontFile.readTTFUShort();
        }

        int glyphIdArrayOffset = fontFile.getCurrentPos();

        BitSet eightBitGlyphs = new BitSet(256);

        // Insert the unicode id for the glyphs in mtxTab
        // and fill in the cmaps ArrayList

        for (int i = 0; i < cmapStartCounts.length; i++) {

            if (log.isTraceEnabled()) {
                log.trace(i + ": " + cmapStartCounts[i] + " - " + cmapEndCounts[i]);
            }
            if (log.isDebugEnabled()) {
                if (isInPrivateUseArea(cmapStartCounts[i], cmapEndCounts[i])) {
                    log.debug("Font contains glyphs in the Unicode private use area: "
                            + Integer.toHexString(cmapStartCounts[i]) + " - "
                            + Integer.toHexString(cmapEndCounts[i]));
                }
            }

            for (int j = cmapStartCounts[i]; j <= cmapEndCounts[i]; j++) {

                // Update lastChar
                if (j < 256 && j > lastChar) {
                    lastChar = (short) j;
                }

                if (j < 256) {
                    eightBitGlyphs.set(j);
                }

                if (mtxPtr < mtxTab.length) {
                    int glyphIdx;
                    // the last character 65535 = .notdef
                    // may have a range offset
                    if (cmapRangeOffsets[i] != 0 && j != 65535) {
                        int glyphOffset = glyphIdArrayOffset + ((cmapRangeOffsets[i] / 2)
                                + (j - cmapStartCounts[i]) + (i) - cmapSegCountX2 / 2) * 2;
                        fontFile.seekSet(glyphOffset);
                        glyphIdx = (fontFile.readTTFUShort() + cmapDeltas[i]) & 0xffff;

                        unicodeMappings.add(new UnicodeMapping(glyphIdx, j));
                        mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));

                        // Also add winAnsiWidth
                        List<Integer> v = ansiIndex.get(new Integer(j));
                        if (v != null) {
                            for (Integer aIdx : v) {
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();

                                if (log.isTraceEnabled()) {
                                    log.trace("Added width " + mtxTab[glyphIdx].getWx() + " uni: " + j
                                            + " ansi: " + aIdx.intValue());
                                }
                            }
                        }

                        if (log.isTraceEnabled()) {
                            log.trace("Idx: " + glyphIdx + " Delta: " + cmapDeltas[i] + " Unicode: " + j
                                    + " name: " + mtxTab[glyphIdx].getName());
                        }
                    } else {
                        glyphIdx = (j + cmapDeltas[i]) & 0xffff;

                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        unicodeMappings.add(new UnicodeMapping(glyphIdx, j));
                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        // Also add winAnsiWidth
                        List<Integer> v = ansiIndex.get(new Integer(j));
                        if (v != null) {
                            for (Integer aIdx : v) {
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();
                            }
                        }

                        //getLogger().debug("IIdx: " +
                        //    mtxPtr +
                        //    " Delta: " + cmap_deltas[i] +
                        //    " Unicode: " + j +
                        //    " name: " +
                        //    mtxTab[(j+cmap_deltas[i]) & 0xffff].name);

                    }
                    if (glyphIdx < mtxTab.length) {
                        if (mtxTab[glyphIdx].getUnicodeIndex().size() < 2) {
                            mtxPtr++;
                        }
                    }
                }
            }
        }
    } else {
        log.error("Cmap format not supported: " + cmapFormat);
        return false;
    }
    return true;
}

From source file:org.apache.asterix.external.library.ClassAdParser.java

private void parseRecord(ARecordType recType, ClassAd pAd, DataOutput out)
        throws IOException, AsterixException {
    ArrayBackedValueStorage fieldValueBuffer = getTempBuffer();
    ArrayBackedValueStorage fieldNameBuffer = getTempBuffer();
    IARecordBuilder recBuilder = getRecordBuilder();
    BitSet nulls = null;
    if (recType != null) {
        nulls = getBitSet();/*from w  ww . j av  a  2  s .co m*/
        recBuilder.reset(recType);
    } else {
        recBuilder.reset(null);
    }
    recBuilder.init();
    Boolean openRecordField = false;
    int fieldId = 0;
    IAType fieldType = null;

    // new stuff
    Map<CaseInsensitiveString, ExprTree> attrs = pAd.getAttrList();
    for (Entry<CaseInsensitiveString, ExprTree> entry : attrs.entrySet()) {
        // reset buffers
        fieldNameBuffer.reset();
        fieldValueBuffer.reset();
        // take care of field name
        String fldName = entry.getKey().get();
        if (recType != null) {
            fieldId = recBuilder.getFieldId(fldName);
            if (fieldId < 0 && !recType.isOpen()) {
                throw new HyracksDataException("This record is closed, you can not add extra fields !!");
            } else if (fieldId < 0 && recType.isOpen()) {
                aStringFieldName.setValue(fldName);
                stringSerde.serialize(aStringFieldName, fieldNameBuffer.getDataOutput());
                openRecordField = true;
                fieldType = null;
            } else {
                // a closed field
                nulls.set(fieldId);
                fieldType = recType.getFieldTypes()[fieldId];
                openRecordField = false;
            }
        } else {
            aStringFieldName.setValue(fldName);
            stringSerde.serialize(aStringFieldName, fieldNameBuffer.getDataOutput());
            openRecordField = true;
            fieldType = null;
        }

        // add field value to value buffer
        writeFieldValueToBuffer(fieldType, fieldValueBuffer.getDataOutput(), fldName, entry.getValue(), pAd);
        if (openRecordField) {
            if (fieldValueBuffer.getByteArray()[0] != ATypeTag.MISSING.serialize()) {
                recBuilder.addField(fieldNameBuffer, fieldValueBuffer);
            }
        } else if (NonTaggedFormatUtil.isOptional(fieldType)) {
            if (fieldValueBuffer.getByteArray()[0] != ATypeTag.MISSING.serialize()) {
                recBuilder.addField(fieldId, fieldValueBuffer);
            }
        } else {
            recBuilder.addField(fieldId, fieldValueBuffer);
        }
    }

    if (recType != null) {
        int optionalFieldId = checkOptionalConstraints(recType, nulls);
        if (optionalFieldId != -1) {
            throw new HyracksDataException(
                    "Field: " + recType.getFieldNames()[optionalFieldId] + " can not be optional");
        }
    }
    recBuilder.write(out, true);
}