Example usage for java.util BitSet BitSet

List of usage examples for java.util BitSet BitSet

Introduction

In this page you can find the example usage for java.util BitSet BitSet.

Prototype

private BitSet(long[] words) 

Source Link

Document

Creates a bit set using words as the internal representation.

Usage

From source file:net.sf.jabref.openoffice.OOBibBase.java

private void insertFullReferenceAtCursor(XTextCursor cursor, Map<BibEntry, BibDatabase> entries,
        OOBibStyle style, String parFormat) throws UndefinedParagraphFormatException, IllegalArgumentException,
        UnknownPropertyException, PropertyVetoException, WrappedTargetException {
    Map<BibEntry, BibDatabase> correctEntries;
    // If we don't have numbered entries, we need to sort the entries before adding them:
    if (!style.isSortByPosition()) {
        Map<BibEntry, BibDatabase> newMap = new TreeMap<>(entryComparator);
        newMap.putAll(entries);//  w  w w  .ja  v a  2 s  .  co  m
        correctEntries = newMap;
    } else {
        // If not, use the received map directly
        correctEntries = entries;
    }
    int number = 1;
    for (Map.Entry<BibEntry, BibDatabase> entry : correctEntries.entrySet()) {
        if (entry.getKey() instanceof UndefinedBibtexEntry) {
            continue;
        }
        OOUtil.insertParagraphBreak(text, cursor);
        if (style.isNumberEntries()) {
            int minGroupingCount = style.getIntCitProperty(OOBibStyle.MINIMUM_GROUPING_COUNT);
            OOUtil.insertTextAtCurrentLocation(text, cursor,
                    style.getNumCitationMarker(Arrays.asList(number++), minGroupingCount, true),
                    new BitSet(OOUtil.TOTAL_FORMAT_COUNT));
        }
        Layout layout = style.getReferenceFormat(entry.getKey().getType());
        layout.setPostFormatter(POSTFORMATTER);
        OOUtil.insertFullReferenceAtCurrentLocation(text, cursor, layout, parFormat, entry.getKey(),
                entry.getValue(), uniquefiers.get(entry.getKey().getCiteKey()));
    }

}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaITCase.java

@Test(timeout = 60000)
public void brokerFailureTest() throws Exception {
    String topic = "brokerFailureTestTopic";

    createTestTopic(topic, 2, 2);//w  w  w  . ja  va  2s.co  m

    // --------------------------- write data to topic ---------------------
    LOG.info("Writing data to topic {}", topic);
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(1);

    DataStream<String> stream = env.addSource(new SourceFunction<String>() {
        private static final long serialVersionUID = 1L;

        boolean running = true;

        @Override
        public void run(SourceContext<String> ctx) throws Exception {
            LOG.info("Starting source.");
            int cnt = 0;
            while (running) {
                String msg = "kafka-" + cnt++;
                ctx.collect(msg);
                LOG.info("sending message = " + msg);

                if ((cnt - 1) % 20 == 0) {
                    LOG.debug("Sending message #{}", cnt - 1);
                }
                if (cnt == 200) {
                    LOG.info("Stopping to produce after 200 msgs");
                    break;
                }

            }
        }

        @Override
        public void cancel() {
            LOG.info("Source got chancel()");
            running = false;
        }
    });
    stream.addSink(new KafkaSink<String>(brokerConnectionStrings, topic, new JavaDefaultStringSchema()))
            .setParallelism(1);

    tryExecute(env, "broker failure test - writer");

    // --------------------------- read and let broker fail ---------------------

    LOG.info("Reading data from topic {} and let a broker fail", topic);
    PartitionMetadata firstPart = null;
    do {
        if (firstPart != null) {
            LOG.info("Unable to find leader. error code {}", firstPart.errorCode());
            // not the first try. Sleep a bit
            Thread.sleep(150);
        }
        Seq<PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient)
                .partitionsMetadata();
        firstPart = partitionMetadata.head();
    } while (firstPart.errorCode() != 0);

    final String leaderToShutDown = firstPart.leader().get().connectionString();
    LOG.info("Leader to shutdown {}", leaderToShutDown);

    final Thread brokerShutdown = new Thread(new Runnable() {
        @Override
        public void run() {
            shutdownKafkaBroker = false;
            while (!shutdownKafkaBroker) {
                try {
                    Thread.sleep(10);
                } catch (InterruptedException e) {
                    LOG.warn("Interruption", e);
                }
            }

            for (KafkaServer kafkaServer : brokers) {
                if (leaderToShutDown.equals(kafkaServer.config().advertisedHostName() + ":"
                        + kafkaServer.config().advertisedPort())) {
                    LOG.info("Killing Kafka Server {}", leaderToShutDown);
                    kafkaServer.shutdown();
                    leaderHasShutDown = true;
                    break;
                }
            }
        }
    });
    brokerShutdown.start();

    // add consuming topology:
    DataStreamSource<String> consuming = env
            .addSource(new PersistentKafkaSource<String>(topic, new JavaDefaultStringSchema(), standardCC));
    consuming.setParallelism(1);

    consuming.addSink(new SinkFunction<String>() {
        private static final long serialVersionUID = 1L;

        int elCnt = 0;
        int start = 0;
        int numOfMessagesToBeCorrect = 100;
        int stopAfterMessages = 150;

        BitSet validator = new BitSet(numOfMessagesToBeCorrect + 1);

        @Override
        public void invoke(String value) throws Exception {
            LOG.info("Got message = " + value + " leader has shut down " + leaderHasShutDown + " el cnt = "
                    + elCnt + " to rec" + numOfMessagesToBeCorrect);
            String[] sp = value.split("-");
            int v = Integer.parseInt(sp[1]);

            if (start == -1) {
                start = v;
            }
            int offset = v - start;
            Assert.assertFalse("Received tuple with value " + offset + " twice", validator.get(offset));
            if (v - start < 0 && LOG.isWarnEnabled()) {
                LOG.warn("Not in order: {}", value);
            }

            validator.set(offset);
            elCnt++;
            if (elCnt == 20) {
                LOG.info("Asking leading broker to shut down");
                // shut down a Kafka broker
                shutdownKafkaBroker = true;
            }
            if (shutdownKafkaBroker) {
                // we become a bit slower because the shutdown takes some time and we have
                // only a fixed nubmer of elements to read
                Thread.sleep(20);
            }
            if (leaderHasShutDown) { // it only makes sence to check once the shutdown is completed
                if (elCnt >= stopAfterMessages) {
                    // check if everything in the bitset is set to true
                    int nc;
                    if ((nc = validator.nextClearBit(0)) < numOfMessagesToBeCorrect) {
                        throw new RuntimeException(
                                "The bitset was not set to 1 on all elements to be checked. Next clear:" + nc
                                        + " Set: " + validator);
                    }
                    throw new SuccessException();
                }
            }
        }
    });
    tryExecute(env, "broker failure test - reader");

}

From source file:org.apache.hadoop.hbase.security.visibility.VisibilityController.java

private Filter createVisibilityLabelFilter(HRegion region, Authorizations authorizations) throws IOException {
    Map<ByteRange, Integer> cfVsMaxVersions = new HashMap<ByteRange, Integer>();
    for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
        cfVsMaxVersions.put(new SimpleByteRange(hcd.getName()), hcd.getMaxVersions());
    }/*  ww  w .  j  av a2 s  .  com*/
    if (authorizations == null) {
        // No Authorizations present for this scan/Get!
        // In case of system tables other than "labels" just scan with out visibility check and
        // filtering. Checking visibility labels for META and NAMESPACE table is not needed.
        TableName table = region.getRegionInfo().getTable();
        if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
            return null;
        }
    } else {
        for (String label : authorizations.getLabels()) {
            if (!VisibilityLabelsValidator.isValidLabel(label)) {
                throw new IllegalArgumentException("Invalid authorization label : " + label
                        + ". Authorizations cannot contain '(', ')' ,'&' ,'|', '!'" + " and cannot be empty");
            }
        }
    }
    Filter visibilityLabelFilter = null;
    if (this.scanLabelGenerators != null) {
        List<String> labels = null;
        for (ScanLabelGenerator scanLabelGenerator : this.scanLabelGenerators) {
            try {
                // null authorizations to be handled inside SLG impl.
                labels = scanLabelGenerator.getLabels(getActiveUser(), authorizations);
                labels = (labels == null) ? new ArrayList<String>() : labels;
                authorizations = new Authorizations(labels);
            } catch (Throwable t) {
                LOG.error(t);
                throw new IOException(t);
            }
        }
        int labelsCount = this.visibilityManager.getLabelsCount();
        BitSet bs = new BitSet(labelsCount + 1); // ordinal is index 1 based
        if (labels != null) {
            for (String label : labels) {
                int labelOrdinal = this.visibilityManager.getLabelOrdinal(label);
                if (labelOrdinal != 0) {
                    bs.set(labelOrdinal);
                }
            }
        }
        visibilityLabelFilter = new VisibilityLabelFilter(bs, cfVsMaxVersions);
    }
    return visibilityLabelFilter;
}

From source file:io.warp10.continuum.gts.GTSHelper.java

/**
 * Allocate memory so we can add one value to the Geo Time Serie.
 * /*www.  j  av  a  2  s  .  com*/
 * @param value The value that will be added, it is just used so we can allocate the correct container for the type.
 */
private static final void provision(GeoTimeSerie gts, Object value, long location, long elevation) {
    //
    // Nothing to do if the ticks array is not full yet.
    //
    if (TYPE.UNDEFINED != gts.type && gts.values < gts.ticks.length) {
        if (GeoTimeSerie.NO_LOCATION == location && GeoTimeSerie.NO_ELEVATION == elevation) {
            return;
        }

        if (null == gts.locations && GeoTimeSerie.NO_LOCATION != location) {
            gts.locations = new long[gts.ticks.length];
            Arrays.fill(gts.locations, GeoTimeSerie.NO_LOCATION);
        }

        if (null == gts.elevations && GeoTimeSerie.NO_ELEVATION != elevation) {
            gts.elevations = new long[gts.ticks.length];
            Arrays.fill(gts.elevations, GeoTimeSerie.NO_ELEVATION);
        }

        return;
    } else if (TYPE.UNDEFINED != gts.type) {
        //
        // We need to grow 'ticks', 'locations', 'elevations' and associated value array.
        //

        int newlen = gts.ticks.length + (int) Math.min(GeoTimeSerie.MAX_ARRAY_GROWTH,
                Math.max(GeoTimeSerie.MIN_ARRAY_GROWTH, gts.ticks.length * GeoTimeSerie.ARRAY_GROWTH_FACTOR));

        //if (newlen > MAX_VALUES) {
        //  throw new RuntimeException("Geo time serie would exceed maximum number of values set to " + MAX_VALUES);
        //}

        if (newlen < gts.sizehint) {
            newlen = gts.sizehint;
        }

        gts.ticks = Arrays.copyOf(gts.ticks, newlen);
        if (null != gts.locations || GeoTimeSerie.NO_LOCATION != location) {
            if (null == gts.locations) {
                gts.locations = new long[gts.ticks.length];
                // Fill all values with NO_LOCATION since we are creating the array and thus
                // must consider all previous locations were undefined
                Arrays.fill(gts.locations, GeoTimeSerie.NO_LOCATION);
            } else {
                gts.locations = Arrays.copyOf(gts.locations, gts.ticks.length);
            }
        }
        if (null != gts.elevations || GeoTimeSerie.NO_ELEVATION != elevation) {
            if (null == gts.elevations) {
                gts.elevations = new long[gts.ticks.length];
                // Fill the newly allocated array with NO_ELEVATION since we must consider
                // all previous elevations were undefined
                Arrays.fill(gts.elevations, GeoTimeSerie.NO_ELEVATION);
            } else {
                gts.elevations = Arrays.copyOf(gts.elevations, gts.ticks.length);
            }
        }

        // BitSets grow automatically...
        if (TYPE.LONG == gts.type) {
            gts.longValues = Arrays.copyOf(gts.longValues, gts.ticks.length);
        } else if (TYPE.DOUBLE == gts.type) {
            gts.doubleValues = Arrays.copyOf(gts.doubleValues, gts.ticks.length);
        } else if (TYPE.STRING == gts.type) {
            gts.stringValues = Arrays.copyOf(gts.stringValues, gts.ticks.length);
        }
    } else if (TYPE.UNDEFINED == gts.type) {
        if (null == gts.ticks) {
            gts.ticks = new long[gts.sizehint > 0 ? gts.sizehint : GeoTimeSerie.MIN_ARRAY_GROWTH];
        }

        // Nullify location if no location is set (since the GTS is empty)
        if (GeoTimeSerie.NO_LOCATION == location) {
            gts.locations = null;
        } else if (null == gts.locations || gts.locations.length < gts.ticks.length) {
            gts.locations = new long[gts.ticks.length];
        }

        // Nullify elevation if no elevation is set (since the GTS is empty)
        if (GeoTimeSerie.NO_ELEVATION == elevation) {
            gts.elevations = null;
        } else if (null == gts.elevations || gts.elevations.length < gts.ticks.length) {
            gts.elevations = new long[gts.ticks.length];
        }

        if (value instanceof Boolean) {
            gts.type = TYPE.BOOLEAN;
            if (null == gts.booleanValues || gts.booleanValues.size() < gts.ticks.length) {
                gts.booleanValues = new BitSet(gts.ticks.length);
            }
        } else if (value instanceof Long || value instanceof Integer || value instanceof Short
                || value instanceof Byte || value instanceof BigInteger) {
            gts.type = TYPE.LONG;
            if (null == gts.longValues || gts.longValues.length < gts.ticks.length) {
                gts.longValues = new long[gts.ticks.length];
            }
        } else if (value instanceof Float || value instanceof Double || value instanceof BigDecimal) {
            gts.type = TYPE.DOUBLE;
            if (null == gts.doubleValues || gts.doubleValues.length < gts.ticks.length) {
                gts.doubleValues = new double[gts.ticks.length];
            }
        } else if (value instanceof String) {
            gts.type = TYPE.STRING;
            if (null == gts.stringValues || gts.stringValues.length < gts.ticks.length) {
                gts.stringValues = new String[gts.ticks.length];
            }
        } else {
            //
            // Default type is boolean, this is so people will rapidly notice
            // this is not what they were expecting...
            //
            gts.type = TYPE.BOOLEAN;
            gts.booleanValues = new BitSet(gts.ticks.length);
        }
    }
}

From source file:org.apache.openjpa.kernel.StateManagerImpl.java

public boolean writeDetached(ObjectOutput out) throws IOException {
    BitSet idxs = new BitSet(_meta.getFields().length);
    lock();//w  w  w  .  j  a va2s  .  c  o  m
    try {
        boolean detsm = DetachManager.writeDetachedState(this, out, idxs);
        if (detsm)
            _flags |= FLAG_DETACHING;

        FieldMetaData[] fmds = _meta.getFields();
        for (int i = 0; i < fmds.length; i++) {
            if (fmds[i].isTransient())
                continue;
            provideField(_pc, _single, i);
            _single.serialize(out, !idxs.get(i));
            _single.clear();
        }
        return true;
    } catch (RuntimeException re) {
        throw translate(re);
    } finally {
        _flags &= ~FLAG_DETACHING;
        unlock();
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PORollupH2IRGForEach.java

protected Result processPlan(int pos) throws ExecException {
    if (schema != null && tupleMaker == null) {
        // Note here that if SchemaTuple is currently turned on, then any
        // UDF's in the chain
        // must follow good practices. Namely, they should not append to the
        // Tuple that comes
        // out of an iterator (a practice which is fairly common, but is not
        // recommended).
        tupleMaker = SchemaTupleFactory.getInstance(schema, false, GenContext.FOREACH);
        if (tupleMaker != null) {
            knownSize = true;/* w w  w. j  a  v a 2  s.c o m*/
        }
    }
    if (tupleMaker == null) {
        tupleMaker = TupleFactory.getInstance();
    }

    Result res = new Result();

    // We check if all the databags have exhausted the tuples. If so we
    // enforce the reading of new data by setting data and its to null
    if (its != null) {
        boolean restartIts = true;
        for (int i = 0; i < noItems; ++i) {
            if (its[i] != null && isToBeFlattenedArray[i] == true) {
                restartIts &= !its[i].hasNext();
            }
        }
        // this means that all the databags have reached their last
        // elements. so we need to force reading of fresh databags
        if (restartIts) {
            its = null;
            data = null;
        }
    }

    if (its == null) {
        // getNext being called for the first time OR starting with a set of
        // new data from inputs
        its = new Iterator[noItems];
        bags = new Object[noItems];
        earlyTermination = new BitSet(noItems);

        int cnt = 0;

        for (int i = 0; i < noItems; ++i) {
            // Getting the iterators
            // populate the input data
            Result inputData = null;
            switch (resultTypes[i]) {
            case DataType.BAG:
            case DataType.TUPLE:
            case DataType.BYTEARRAY:
            case DataType.MAP:
            case DataType.BOOLEAN:
            case DataType.INTEGER:
            case DataType.DOUBLE:
            case DataType.LONG:
            case DataType.FLOAT:
            case DataType.BIGINTEGER:
            case DataType.BIGDECIMAL:
            case DataType.DATETIME:
            case DataType.CHARARRAY:
                inputData = planLeafOps[i].getNext(resultTypes[i]);
                // We stores the payloads that we want to compute the rollup
                // in tmpResult
                // for the first IRG and in tmpResult2 for the second IRG
                if (((planLeafOps[i]) instanceof POUserFunc)//&& !isSampler)
                        && (inputData.result != null) && (pos != -1)) {
                    if (!secondPass) {
                        tmpResult[pos][cnt++].add(mTupleFactory.newTuple(inputData.result));
                    } else {
                        tmpResult2[pos][cnt++].add(mTupleFactory.newTuple(inputData.result));
                    }
                }
                break;
            default: {
                int errCode = 2080;
                String msg = "Foreach currently does not handle type " + DataType.findTypeName(resultTypes[i]);
                throw new ExecException(msg, errCode, PigException.BUG);
            }

            }

            // we accrue information about what accumulators have early
            // terminated
            // in the case that they all do, we can finish
            if (inputData.returnStatus == POStatus.STATUS_EARLY_TERMINATION) {
                if (!earlyTermination.get(i))
                    earlyTermination.set(i);

                continue;
            }

            if (inputData.returnStatus == POStatus.STATUS_BATCH_OK) {
                continue;
            }

            if (inputData.returnStatus == POStatus.STATUS_EOP) {
                // we are done with all the elements. Time to return.
                its = null;
                bags = null;
                return inputData;
            }
            // if we see a error just return it
            if (inputData.returnStatus == POStatus.STATUS_ERR) {
                return inputData;
            }

            bags[i] = inputData.result;

            if (inputData.result instanceof DataBag && isToBeFlattenedArray[i]) {
                its[i] = ((DataBag) bags[i]).iterator();
            } else {
                its[i] = null;
            }
        }
    }

    // if accumulating, we haven't got data yet for some fields, just return
    if (isAccumulative() && isAccumStarted()) {
        if (earlyTermination.cardinality() < noItems) {
            res.returnStatus = POStatus.STATUS_BATCH_OK;
        } else {
            res.returnStatus = POStatus.STATUS_EARLY_TERMINATION;
        }
        return res;
    }

    while (true) {
        if (data == null) {
            // getNext being called for the first time or starting on new
            // input data
            // we instantiate the template array and start populating it
            // with data
            data = new Object[noItems];
            for (int i = 0; i < noItems; ++i) {
                if (isToBeFlattenedArray[i] && bags[i] instanceof DataBag) {
                    if (its[i].hasNext()) {
                        data[i] = its[i].next();
                    } else {
                        // the input set is null, so we return. This is
                        // caught above and this function recalled with
                        // new inputs.
                        its = null;
                        data = null;
                        res.returnStatus = POStatus.STATUS_NULL;
                        return res;
                    }
                } else {
                    data[i] = bags[i];
                }

            }
            if (getReporter() != null) {
                getReporter().progress();
            }
            // createTuple(data);

            res.result = createTuple(data);

            res.returnStatus = POStatus.STATUS_OK;
            return res;
        } else {
            // we try to find the last expression which needs flattening and
            // start iterating over it
            // we also try to update the template array
            for (int index = noItems - 1; index >= 0; --index) {
                if (its[index] != null && isToBeFlattenedArray[index]) {
                    if (its[index].hasNext()) {
                        data[index] = its[index].next();
                        res.result = createTuple(data);
                        res.returnStatus = POStatus.STATUS_OK;
                        return res;
                    } else {
                        its[index] = ((DataBag) bags[index]).iterator();
                        data[index] = its[index].next();
                    }
                }
            }
        }
    }
}

From source file:org.unitime.timetable.solver.studentsct.StudentSectioningDatabaseLoader.java

public static BitSet getFreeTimeBitSet(Session session) {
    int startMonth = session.getPatternStartMonth();
    int endMonth = session.getPatternEndMonth();
    int size = DateUtils.getDayOfYear(0, endMonth + 1, session.getSessionStartYear())
            - DateUtils.getDayOfYear(1, startMonth, session.getSessionStartYear());
    BitSet ret = new BitSet(size);
    for (int i = 0; i < size; i++)
        ret.set(i);//w ww  . ja  v  a  2  s  . co m
    return ret;
}

From source file:org.apache.carbondata.core.scan.filter.FilterUtil.java

/**
 * This method will create default bitset group. Applicable for restructure scenarios.
 *
 * @param pageCount//from w  w w  . ja v a  2  s . c o m
 * @param totalRowCount
 * @param defaultValue
 * @return
 */
public static BitSetGroup createBitSetGroupWithDefaultValue(int pageCount, int totalRowCount,
        boolean defaultValue) {
    BitSetGroup bitSetGroup = new BitSetGroup(pageCount);
    int numberOfRows = CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
    int pagesTobeFullFilled = totalRowCount / numberOfRows;
    int rowCountForLastPage = totalRowCount % numberOfRows;
    for (int i = 0; i < pagesTobeFullFilled; i++) {
        BitSet bitSet = new BitSet(numberOfRows);
        bitSet.set(0, numberOfRows, defaultValue);
        bitSetGroup.setBitSet(bitSet, i);
    }
    // create and fill bitset for the last page if any records are left
    if (rowCountForLastPage > 0) {
        BitSet bitSet = new BitSet(rowCountForLastPage);
        bitSet.set(0, rowCountForLastPage, defaultValue);
        bitSetGroup.setBitSet(bitSet, pagesTobeFullFilled);
    }
    return bitSetGroup;
}

From source file:org.apache.openjpa.kernel.StateManagerImpl.java

/**
 * Load the given field's fetch group; the field itself may already be
 * loaded if it is being set by the user.
 *//* w  w w.  ja va 2  s .  c  om*/
protected void loadField(int field, int lockLevel, boolean forWrite, boolean fgs) {
    FetchConfiguration fetch = _broker.getFetchConfiguration();
    FieldMetaData fmd = _meta.getField(field);
    BitSet fields = null;

    // if this is a dfg field or we need to load our dfg, do so
    if (fgs && (_flags & FLAG_LOADED) == 0)
        fields = getUnloadedInternal(fetch, LOAD_FGS, null);

    // check for load fetch group
    String lfg = fmd.getLoadFetchGroup();
    boolean lfgAdded = false;
    if (lfg != null) {
        FieldMetaData[] fmds = _meta.getFields();
        for (int i = 0; i < fmds.length; i++) {
            if (!_loaded.get(i) && (i == field || fmds[i].isInFetchGroup(lfg))) {
                if (fields == null)
                    fields = new BitSet(fmds.length);
                fields.set(i);
            }
        }

        // relation field is loaded with the load-fetch-group
        // but this addition must be reverted once the load is over
        if (!fetch.hasFetchGroup(lfg)) {
            fetch.addFetchGroup(lfg);
            lfgAdded = true;
        }
    } else if (fmd.isInDefaultFetchGroup() && fields == null) {
        // no load group but dfg: add dfg fields if we haven't already
        fields = getUnloadedInternal(fetch, LOAD_FGS, null);
    } else if (!_loaded.get(fmd.getIndex())) {
        // no load group or dfg: load individual field
        if (fields == null)
            fields = new BitSet();
        fields.set(fmd.getIndex());
    }

    // call this method even if there are no unloaded fields; loadFields
    // takes care of things like loading version info and setting PC flags
    try {
        loadFields(fields, fetch, lockLevel, null);
    } finally {
        if (lfgAdded)
            fetch.removeFetchGroup(lfg);
    }
}

From source file:org.apache.openjpa.kernel.StateManagerImpl.java

public BitSet getFlushed() {
    if (_flush == null) {
        _flush = new BitSet(_meta.getFields().length);
    }
    return _flush;
}