Example usage for java.util BitSet get

List of usage examples for java.util BitSet get

Introduction

In this page you can find the example usage for java.util BitSet get.

Prototype

public boolean get(int bitIndex) 

Source Link

Document

Returns the value of the bit with the specified index.

Usage

From source file:org.apache.mahout.benchmark.VectorBenchmarks.java

private void setUpVectors(int cardinality, int numNonZeros, int numVectors) {
    for (int i = 0; i < numVectors; i++) {
        Vector v = new SequentialAccessSparseVector(cardinality, numNonZeros); // sparsity!
        BitSet featureSpace = new BitSet(cardinality);
        int[] indexes = new int[numNonZeros];
        double[] values = new double[numNonZeros];
        int j = 0;
        while (j < numNonZeros) {
            double value = r.nextGaussian();
            int index = r.nextInt(cardinality);
            if (!featureSpace.get(index) && value != 0) {
                featureSpace.set(index);
                indexes[j] = index;/*  w  ww  .j  a  v a 2  s.  c o  m*/
                values[j++] = value;
                v.set(index, value);
            }
        }
        randomVectorIndices.add(indexes);
        randomVectorValues.add(values);
        randomVectors.add(v);
    }
}

From source file:org.jax.haplotype.analysis.StrainBinaryPartitionSignificanceTester.java

/**
 * Test the significance of the given responses
 * @param strainPartitions//from   w ww . ja v a  2 s.c om
 *          the partitions to test
 * @param strainResponses
 *          the responses
 * @return
 *          test p-values. this will be an array as long as the
 *          input partitions
 */
public double[] tTestSingleResponseSignificance(List<? extends BinaryStrainPartition> strainPartitions,
        double[] strainResponses) {
    double[] significanceValues = new double[strainPartitions.size()];
    for (int currPartitionIndex = 0; currPartitionIndex < strainPartitions.size(); currPartitionIndex++) {
        BinaryStrainPartition currPartition = strainPartitions.get(currPartitionIndex);

        // segregate the responses
        BitSet currStrainBitSet = currPartition.getStrainBitSet();
        int currStrainCount = currStrainBitSet.cardinality();
        double[] insidePartitionResponses = new double[currStrainCount];
        double[] outsidePartitionResponses = new double[strainResponses.length - currStrainCount];

        int currInsidePartitionCursor = 0;
        int currOutsidePartitionCursor = 0;
        for (int responseIndex = 0; responseIndex < strainResponses.length; responseIndex++) {
            if (currStrainBitSet.get(responseIndex)) {
                // this response is in the partition
                insidePartitionResponses[currInsidePartitionCursor] = strainResponses[responseIndex];
                currInsidePartitionCursor++;
            } else {
                // this response is outside the partition
                outsidePartitionResponses[currOutsidePartitionCursor] = strainResponses[responseIndex];
                currOutsidePartitionCursor++;
            }
        }

        assert currInsidePartitionCursor == insidePartitionResponses.length;
        assert currOutsidePartitionCursor == outsidePartitionResponses.length;

        // perform t-test on segregated responses
        if (insidePartitionResponses.length <= 2 || outsidePartitionResponses.length <= 2) {
            significanceValues[currPartitionIndex] = 1.0;
        } else {
            DescriptiveStatistics insidePartitionResponseSummary = new DescriptiveStatistics();
            for (double currInsideResponseValue : insidePartitionResponses) {
                insidePartitionResponseSummary.addValue(currInsideResponseValue);
            }

            DescriptiveStatistics outsidePartitionResponseSummary = new DescriptiveStatistics();
            for (double currOutsideResponseValue : outsidePartitionResponses) {
                outsidePartitionResponseSummary.addValue(currOutsideResponseValue);
            }

            try {
                double pValue = this.tTester.tTest(insidePartitionResponseSummary,
                        outsidePartitionResponseSummary);

                significanceValues[currPartitionIndex] = pValue;
            } catch (MathException ex) {
                throw new IllegalStateException(ex);
            }
        }
    }

    return significanceValues;
}

From source file:org.apache.hadoop.mapred.TestSequenceFileInputFormat.java

public void testFormat() throws Exception {
    JobConf job = new JobConf(conf);
    FileSystem fs = FileSystem.getLocal(conf);
    Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
    Path file = new Path(dir, "test.seq");

    Reporter reporter = Reporter.NULL;/*from  www .jav a 2  s  .c o  m*/

    int seed = new Random().nextInt();
    //LOG.info("seed = "+seed);
    Random random = new Random(seed);

    fs.delete(dir, true);

    FileInputFormat.setInputPaths(job, dir);

    // for a variety of lengths
    for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) {

        //LOG.info("creating; entries = " + length);

        // create a file with length entries
        SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, file, IntWritable.class,
                BytesWritable.class);
        try {
            for (int i = 0; i < length; i++) {
                IntWritable key = new IntWritable(i);
                byte[] data = new byte[random.nextInt(10)];
                random.nextBytes(data);
                BytesWritable value = new BytesWritable(data);
                writer.append(key, value);
            }
        } finally {
            writer.close();
        }

        // try splitting the file in a variety of sizes
        InputFormat<IntWritable, BytesWritable> format = new SequenceFileInputFormat<IntWritable, BytesWritable>();
        IntWritable key = new IntWritable();
        BytesWritable value = new BytesWritable();
        for (int i = 0; i < 3; i++) {
            int numSplits = random.nextInt(MAX_LENGTH / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
            //LOG.info("splitting: requesting = " + numSplits);
            InputSplit[] splits = format.getSplits(job, numSplits);
            //LOG.info("splitting: got =        " + splits.length);

            // check each split
            BitSet bits = new BitSet(length);
            for (int j = 0; j < splits.length; j++) {
                RecordReader<IntWritable, BytesWritable> reader = format.getRecordReader(splits[j], job,
                        reporter);
                try {
                    int count = 0;
                    while (reader.next(key, value)) {
                        // if (bits.get(key.get())) {
                        // LOG.info("splits["+j+"]="+splits[j]+" : " + key.get());
                        // LOG.info("@"+reader.getPos());
                        // }
                        assertFalse("Key in multiple partitions.", bits.get(key.get()));
                        bits.set(key.get());
                        count++;
                    }
                    //LOG.info("splits["+j+"]="+splits[j]+" count=" + count);
                } finally {
                    reader.close();
                }
            }
            assertEquals("Some keys in no partition.", length, bits.cardinality());
        }

    }
}

From source file:flink.iso8583.MessageFactory.java

/** Creates a new message instance from the buffer, which must contain a valid ISO8583
 * message. If the factory is set to use binary messages then it will try to parse
 * a binary message./*w  w w .  j  av  a2s.  c  om*/
 * @param buf The byte buffer containing the message. Must not include the length header.
 * @param isoHeaderLength The expected length of the ISO header, after which the message type
 * and the rest of the message must come. */
public IsoMessage parseMessage(byte[] buf, int isoHeaderLength) throws ParseException {
    IsoMessage m = new IsoMessage(isoHeaderLength > 0 ? new String(buf, 0, isoHeaderLength) : null);
    //TODO it only parses ASCII messages for now
    int type = 0;
    if (useBinary) {
        type = ((buf[isoHeaderLength] & 0xff) << 8) | (buf[isoHeaderLength + 1] & 0xff);
    } else {
        type = ((buf[isoHeaderLength] - 48) << 12) | ((buf[isoHeaderLength + 1] - 48) << 8)
                | ((buf[isoHeaderLength + 2] - 48) << 4) | (buf[isoHeaderLength + 3] - 48);
    }
    m.setType(type);
    //Parse the bitmap (primary first)
    BitSet bs = new BitSet(64);
    int pos = 0;
    if (useBinary) {
        for (int i = isoHeaderLength + 2; i < isoHeaderLength + 10; i++) {
            int bit = 128;
            for (int b = 0; b < 8; b++) {
                bs.set(pos++, (buf[i] & bit) != 0);
                bit >>= 1;
            }
        }
        //Check for secondary bitmap and parse if necessary
        if (bs.get(0)) {
            for (int i = isoHeaderLength + 10; i < isoHeaderLength + 18; i++) {
                int bit = 128;
                for (int b = 0; b < 8; b++) {
                    bs.set(pos++, (buf[i] & bit) != 0);
                    bit >>= 1;
                }
            }
            pos = 18 + isoHeaderLength;
        } else {
            pos = 10 + isoHeaderLength;
        }
    } else {
        for (int i = isoHeaderLength + 4; i < isoHeaderLength + 20; i++) {
            int hex = Integer.parseInt(new String(buf, i, 1), 16);
            bs.set(pos++, (hex & 8) > 0);
            bs.set(pos++, (hex & 4) > 0);
            bs.set(pos++, (hex & 2) > 0);
            bs.set(pos++, (hex & 1) > 0);
        }
        //Check for secondary bitmap and parse it if necessary
        if (bs.get(0)) {
            for (int i = isoHeaderLength + 20; i < isoHeaderLength + 36; i++) {
                int hex = Integer.parseInt(new String(buf, i, 1), 16);
                bs.set(pos++, (hex & 8) > 0);
                bs.set(pos++, (hex & 4) > 0);
                bs.set(pos++, (hex & 2) > 0);
                bs.set(pos++, (hex & 1) > 0);
            }
            pos = 36 + isoHeaderLength;
        } else {
            pos = 20 + isoHeaderLength;
        }
    }
    //Parse each field
    Integer itype = new Integer(type);
    Map parseGuide = (Map) parseMap.get(itype);
    List index = (List) parseOrder.get(itype);
    for (Iterator iter = index.iterator(); iter.hasNext();) {
        Integer i = (Integer) iter.next();
        FieldParseInfo fpi = (FieldParseInfo) parseGuide.get(i);
        if (bs.get(i.intValue() - 1)) {
            IsoValue val = useBinary ? fpi.parseBinary(buf, pos) : fpi.parse(buf, pos);
            m.setField(i.intValue(), val);
            if (useBinary && !(val.getType() == IsoType.ALPHA || val.getType() == IsoType.LLVAR
                    || val.getType() == IsoType.LLLVAR)) {
                pos += (val.getLength() / 2) + (val.getLength() % 2);
            } else {
                pos += val.getLength();
            }
            if (val.getType() == IsoType.LLVAR) {
                pos += useBinary ? 1 : 2;
            } else if (val.getType() == IsoType.LLLVAR) {
                pos += useBinary ? 2 : 3;
            }
        }
    }
    return m;
}

From source file:sf.net.experimaestro.scheduler.SchedulerTest.java

@Test(description = "Test of the token resource - one job at a time")
public void test_token_resource() throws ExperimaestroCannotOverwrite, InterruptedException, IOException {

    File jobDirectory = mkTestDir();

    ThreadCount counter = new ThreadCount();
    TokenResource token = new TokenResource("scheduler_test/test_token_resource", 1);
    Transaction.run((em, t) -> token.save(t));

    // Sets 5 jobs
    WaitingJob[] jobs = new WaitingJob[5];
    BitSet failure = new BitSet();
    failure.set(3);/*from w ww  .j  ava2s  .c o  m*/

    for (int i = 0; i < jobs.length; i++) {
        jobs[i] = new WaitingJob(counter, jobDirectory, "job" + i, new Action(250, failure.get(i) ? 1 : 0, 0));
        final WaitingJob job = jobs[i];
        Transaction.run((em, t) -> {
            job.addDependency(token.createDependency(null));
            job.save(t);
        });
    }

    waitToFinish(0, counter, jobs, 1500, 5);
    waitBeforeCheck();

    // Check that one started after the other (since only one must have been active
    // at a time)
    LOGGER.info("Checking the token test output");

    // Retrieve all the jobs

    int errors = 0;
    errors += checkSequence(true, false, jobs);
    for (int i = 0; i < jobs.length; i++) {
        errors += checkState(
                jobs[i].finalCode() != 0 ? EnumSet.of(ResourceState.ERROR) : EnumSet.of(ResourceState.DONE),
                jobs[i]);
    }
    Assert.assertTrue(errors == 0, "Detected " + errors + " errors after running jobs");
}

From source file:org.elasticsearch.storm.EsBolt.java

private void flushWithAck() {
    BitSet flush = null;

    try {//from w w w.  j  ava 2  s  . c  o  m
        flush = writer.repository.tryFlush();
        writer.repository.discard();
    } catch (EsHadoopException ex) {
        // fail all recorded tuples
        for (Tuple input : inflightTuples) {
            collector.fail(input);
        }
        inflightTuples.clear();
        throw ex;
    }

    for (int index = 0; index < inflightTuples.size(); index++) {
        Tuple tuple = inflightTuples.get(index);
        // bit set means the entry hasn't been removed and thus wasn't written to ES
        if (flush.get(index)) {
            collector.fail(tuple);
        } else {
            collector.ack(tuple);
        }
    }

    // clear everything in bulk to prevent 'noisy' remove()
    inflightTuples.clear();
}

From source file:edu.uci.ics.hyracks.algebricks.rewriter.rules.ExtractCommonOperatorsRule.java

private boolean requiresMaterialization(List<Integer> groupClusterIds, int index) {
    Integer clusterId = groupClusterIds.get(index);
    BitSet blockingClusters = new BitSet();
    getAllBlockingClusterIds(clusterId, blockingClusters);
    if (!blockingClusters.isEmpty()) {
        for (int i = 0; i < groupClusterIds.size(); i++) {
            if (i == index) {
                continue;
            }/*from   w  ww .  j a va 2  s .  c  o m*/
            if (blockingClusters.get(groupClusterIds.get(i))) {
                return true;
            }
        }
    }
    return false;
}

From source file:org.apache.openjpa.jdbc.sql.JoinSet.java

/**
 * Iterator over joins that prepares them for SQL translation.
 *///from   ww  w .  j  a va 2  s .c  o  m
public Iterator joinIterator() {
    if (_size < 2)
        return iterator();
    if (_sorted != null)
        return _sorted.iterator();

    List sorted = new ArrayList(_size);
    LinkedList queue = new LinkedList();
    BitSet seen = new BitSet(_graph.size() * _graph.size() + _graph.size());

    // traverse graph
    Node n;
    int idx, sidx;
    for (int i = 0; i < _graph.size(); i++) {
        // seed queue with next set of disconnected joins
        for (n = (Node) _graph.get(i); n != null; n = n.next) {
            sidx = getSeenIndex(n.join);
            if (!seen.get(sidx)) {
                seen.set(sidx);
                queue.add(n);
            }
        }
        if (queue.isEmpty())
            continue;

        // traverse from those joins to reachables
        while (!queue.isEmpty()) {
            n = (Node) queue.removeFirst();

            // don't repeat a join to a table we've already joined, but
            // do traverse through it in the graph (the first indexes of
            // the seeen bitset are reserved for joined-to tables)
            idx = (n.forward) ? n.join.getIndex2() : n.join.getIndex1();
            if (!seen.get(idx)) {
                sorted.add((n.forward) ? n.join : n.join.reverse());
                seen.set(idx);
            }

            for (n = (Node) _graph.get(idx); n != null; n = n.next) {
                sidx = getSeenIndex(n.join);
                if (!seen.get(sidx)) {
                    seen.set(sidx);
                    queue.add(n);
                }
            }
        }
    }
    _sorted = sorted;
    return _sorted.iterator();
}

From source file:org.ala.spatial.analysis.layers.SitesBySpeciesTabulated.java

/**
 * Generate and write the sites by species list.
 * <p/>//  w w  w  . j  av  a2  s. c o  m
 * Output files have both .csv and .json decades, tabulation by decades
 * decadecounts, tabulation by (species in) sequential decades
 * bioregionName, tabulation by bioregions (from ssf or grid & gridColumns)
 *
 * @param records         all occurrence records for this density grid as Records.
 * @param outputDirectory path to the output directory.
 * @param region          area restriction, or null for everywhere the occurrences
 *                        appear, as SimpleRegion.
 * @param envelopeGrid    area restriction as an envelope grid, or null for
 *                        everywhere the occurrences appear, as Grid
 * @param bioregionName   null or output bioregion name.
 * @param ssf             null or bioregion as shape file with a single column as
 *                        SimpleRegion.
 * @param grid            null or bioregion as Grid. Must also have gridColumns.
 * @param gridColumns     null or grid bioregion category lookup values as
 *                        String [].
 * @param decade          true to generate decades and decadecounts output
 *                        tabulations.
 * @throws IOException
 */
public void write(Records records, String outputDirectory, SimpleRegion region, Grid envelopeGrid,
        String bioregionName, SimpleShapeFile ssf, Grid grid, String[] gridColumns, boolean decade)
        throws IOException {
    String[] columns = null;
    int[] gridIntersections = null;
    int numberOfBioregions = 0;
    // get columns for bioregion categories from ssf or gridColumns.
    if (ssf != null) {
        columns = ssf.getColumnLookup();
    } else if (grid != null) {
        columns = gridColumns;
        gridIntersections = new int[records.getRecordsSize()];
        double[][] points = new double[records.getRecordsSize()][2];
        for (int i = 0; i < records.getRecordsSize(); i++) {
            points[i][0] = records.getLongitude(i);
            points[i][1] = records.getLatitude(i);
        }
        float[] f = grid.getValues(points);
        for (int i = 0; i < f.length; i++) {
            gridIntersections[i] = (int) f[i];
            if (gridIntersections[i] < 0 || gridIntersections[i] >= gridColumns.length + 1) {
                gridIntersections[i] = -1;
            }
        }
        f = null;
        points = null;
    }
    if (columns != null) {
        numberOfBioregions = columns.length + 1;
    }

    int uniqueSpeciesCount = records.getSpeciesSize();

    short[] decadeIdx = getDecadeIdx(records);
    int numberOfDecades = decadeIdx[decadeIdx.length - 1] + 1;

    HashMap<Integer, Integer>[] bioMap = new HashMap[numberOfBioregions];
    HashMap<Integer, Integer>[] decMap = new HashMap[numberOfDecades];
    HashMap<Integer, Integer>[] decCountMap = new HashMap[numberOfDecades + 1];
    for (int i = 0; i < bioMap.length; i++) {
        bioMap[i] = new HashMap<Integer, Integer>();
    }
    for (int i = 0; i < decMap.length; i++) {
        decMap[i] = new HashMap<Integer, Integer>();
    }
    for (int i = 0; i < decCountMap.length; i++) {
        decCountMap[i] = new HashMap<Integer, Integer>();
    }

    records.sortedStarts(bbox[1], bbox[0], resolution);

    BitSet[] bsDecades = new BitSet[numberOfDecades];
    BitSet[] bsBioregions = new BitSet[numberOfBioregions];
    for (int j = 0; j < numberOfBioregions; j++) {
        bsBioregions[j] = new BitSet(uniqueSpeciesCount);
    }
    for (int j = 0; j < numberOfDecades; j++) {
        bsDecades[j] = new BitSet(uniqueSpeciesCount);
    }
    int[] decContinousCounts = new int[records.getSpeciesSize()];

    for (int pos = 0; pos < records.getRecordsSize();) {
        //find end pos
        int x = (int) ((records.getLongitude(pos) - bbox[0]) / resolution);
        int y = (int) ((records.getLatitude(pos) - bbox[1]) / resolution);
        int endPos = pos + 1;
        while (endPos < records.getRecordsSize()
                && x == (int) ((records.getLongitude(endPos) - bbox[0]) / resolution)
                && y == (int) ((records.getLatitude(pos) - bbox[1]) / resolution)) {
            endPos++;
        }

        double longitude = (x + 0.5) * resolution;
        double latitude = (y + 0.5) * resolution;
        if ((region == null || region.isWithin_EPSG900913(longitude, latitude)) && (envelopeGrid == null
                || envelopeGrid.getValues2(new double[][] { { longitude, latitude } })[0] > 0)) {
            //process this cell
            getNextIntArrayRow(records, pos, endPos, bsBioregions, bsDecades, ssf, gridIntersections,
                    decadeIdx);

            for (int j = 0; j < numberOfBioregions; j++) {
                int group = bsBioregions[j].cardinality();
                if (group > 0) {
                    Integer count = bioMap[j].get(group);
                    bioMap[j].put(group, count == null ? 1 : count + 1);
                }
            }
            for (int j = 0; j < numberOfDecades; j++) {
                int group = bsDecades[j].cardinality();
                if (group > 0) {
                    Integer count = decMap[j].get(group);
                    decMap[j].put(group, count == null ? 1 : count + 1);
                }
            }

            //reset
            for (int j = 0; j < decContinousCounts.length; j++) {
                decContinousCounts[j] = 0;
            }
            //sum
            for (int j = 0; j < numberOfDecades; j++) {
                BitSet bs = bsDecades[j];
                if (bs.cardinality() > 0) {
                    for (int k = 0; k < bs.length(); k++) {
                        if (bs.get(k)) {
                            decContinousCounts[k]++;
                        }
                    }
                }
            }
            //count
            java.util.Arrays.sort(decContinousCounts);
            int count = 1;
            for (int j = 1; j < decContinousCounts.length; j++) {
                if (decContinousCounts[j] == decContinousCounts[j - 1]) {
                    count++;
                } else {
                    Integer c = decCountMap[decContinousCounts[j - 1]].get(count);
                    decCountMap[decContinousCounts[j - 1]].put(count, c == null ? 1 : c + 1);
                    count = 1;
                }
            }
            Integer c = decCountMap[decContinousCounts[decContinousCounts.length - 1]].get(count);
            decCountMap[decContinousCounts[decContinousCounts.length - 1]].put(count, c == null ? 1 : c + 1);
        }

        pos = endPos;
    }

    if (numberOfBioregions > 0) {
        writeBioregions(bioregionName, outputDirectory, columns, bioMap);
    }
    writeDecades(outputDirectory, decadeIdx, decMap);
    writeDecadeCounts(outputDirectory, decCountMap);
}

From source file:org.jax.haplotype.analysis.StrainBinaryPartitionSignificanceTester.java

/**
 * Test the significance of the given responses
 * @param genomicPartitions//w  w  w.  j a v a 2s  . com
 *          the partitions to test
 * @param strainResponses
 *          the responses
 * @return
 *          significance values. this will be an array as long as the
 *          input partitions
 */
public double[] normalizedTestSingleResponseSignificance(
        List<? extends PartitionedIntervalSet> genomicPartitions, double[] strainResponses) {
    double[] sinificanceValues = new double[genomicPartitions.size()];
    for (int currPartitionIndex = 0; currPartitionIndex < genomicPartitions.size(); currPartitionIndex++) {
        BinaryStrainPartition currPartition = genomicPartitions.get(currPartitionIndex);

        // segregate the responses
        BitSet currPartitionStrainBitSet = currPartition.getStrainBitSet();
        int currPartitionChromoCount = currPartitionStrainBitSet.cardinality();
        double[] insidePartitionResponses = new double[currPartitionChromoCount];
        double[] outsidePartitionResponses = new double[strainResponses.length - currPartitionChromoCount];

        int currInsidePartitionCursor = 0;
        int currOutsidePartitionCursor = 0;
        for (int responseIndex = 0; responseIndex < strainResponses.length; responseIndex++) {
            if (currPartitionStrainBitSet.get(responseIndex)) {
                // this response is in the partition
                insidePartitionResponses[currInsidePartitionCursor] = strainResponses[responseIndex];
                currInsidePartitionCursor++;
            } else {
                // this response is outside the partition
                outsidePartitionResponses[currOutsidePartitionCursor] = strainResponses[responseIndex];
                currOutsidePartitionCursor++;
            }
        }

        assert currInsidePartitionCursor == insidePartitionResponses.length;
        assert currOutsidePartitionCursor == outsidePartitionResponses.length;

        // perform t-test on segregated responses
        if (insidePartitionResponses.length <= 2 || outsidePartitionResponses.length <= 2) {
            sinificanceValues[currPartitionIndex] = 1.0;
        } else {
            DescriptiveStatistics insidePartitionResponseSummary = new DescriptiveStatistics();
            for (double currInsideResponseValue : insidePartitionResponses) {
                insidePartitionResponseSummary.addValue(currInsideResponseValue);
            }

            DescriptiveStatistics outsidePartitionResponseSummary = new DescriptiveStatistics();
            for (double currOutsideResponseValue : outsidePartitionResponses) {
                outsidePartitionResponseSummary.addValue(currOutsideResponseValue);
            }

            try {
                double pValue = this.tTester.tTest(insidePartitionResponseSummary,
                        outsidePartitionResponseSummary);

                // reduce the pValue relative to cumulative extent
                pValue /= genomicPartitions.get(currPartitionIndex).getCumulativeExtentInBasePairs();

                sinificanceValues[currPartitionIndex] = pValue;
            } catch (MathException ex) {
                throw new IllegalStateException(ex);
            }
        }
    }

    return sinificanceValues;
}