Example usage for java.util Arrays copyOfRange

List of usage examples for java.util Arrays copyOfRange

Introduction

In this page you can find the example usage for java.util Arrays copyOfRange.

Prototype

public static boolean[] copyOfRange(boolean[] original, int from, int to) 

Source Link

Document

Copies the specified range of the specified array into a new array.

Usage

From source file:algorithm.PDFFileAttacher.java

/**
 * PDFBox adds a new line with ^M at the end of the restored payload file.
 * This method removes the buggy line./*from w  ww. j  a v a 2  s  .c o  m*/
 * 
 * @param restoredPayload
 */
private void removeBuggyLineEnding(File restoredPayload) throws IOException {
    byte[] data = FileUtils.readFileToByteArray(restoredPayload);
    FileUtils.writeByteArrayToFile(restoredPayload, Arrays.copyOfRange(data, 0, data.length - 2), false);
}

From source file:net.netheos.pcsapi.BytesIOTest.java

private void checkProgressByteSink(ByteSink sink, byte[] byteContent) throws IOException {
    StdoutProgressListener pl = new StdoutProgressListener();
    ProgressByteSink pbs = new ProgressByteSink(sink, pl);

    ByteSinkStream stream = pbs.openStream();
    try {//from   w  w w .  j  a v  a 2s .  c  om
        assertEquals(-1, pl.getTotal());
        assertEquals(0, pl.getCurrent());
        assertFalse(pl.isAborted());

        pbs.setExpectedLength(byteContent.length);
        assertEquals(byteContent.length, pl.getTotal());

        stream.write(Arrays.copyOfRange(byteContent, 0, 1));

        assertEquals(1, pl.getCurrent());

        byte[] remaining = Arrays.copyOfRange(byteContent, 1, byteContent.length);

        stream.write(remaining);
        assertEquals(pl.getTotal(), pl.getCurrent());

        stream.abort();
        assertTrue(stream.isAborted());

    } finally {
        IOUtils.closeQuietly(stream);
    }
}

From source file:edu.cmu.graphchi.shards.QueryShard.java

void loadPointers() throws IOException {
    File pointerFile = new File(ChiFilenames.getFilenameShardsAdjPointers(adjFile.getAbsolutePath()));
    if (!pinIndexToMemory) {
        FileChannel ptrFileChannel = new java.io.RandomAccessFile(pointerFile, "r").getChannel();
        pointerIdxBuffer = ptrFileChannel.map(FileChannel.MapMode.READ_ONLY, 0, pointerFile.length())
                .asLongBuffer();//  ww w. java  2 s. co  m
        ptrFileChannel.close();
    } else {
        byte[] data = new byte[(int) pointerFile.length()];

        if (data.length == 0)
            return;
        totalOrigSize += data.length;

        FileInputStream fis = new FileInputStream(pointerFile);
        int i = 0;
        while (i < data.length) {
            i += fis.read(data, i, data.length - i);
        }
        fis.close();

        pointerIdxBuffer = ByteBuffer.wrap(data).asLongBuffer();

        long[] vertices = new long[pointerIdxBuffer.capacity() - 1];
        long[] offs = new long[vertices.length];

        for (int j = 0; j < vertices.length; j++) {
            long x = pointerIdxBuffer.get(j);
            vertices[j] = VertexIdTranslate.getVertexId(x);
            offs[j] = VertexIdTranslate.getAux(x);
        }

        boolean extraZero = (offs.length > 1 && offs[1] == offs[0]);
        if (extraZero) {
            vertices = Arrays.copyOfRange(vertices, 1, vertices.length);
            offs = Arrays.copyOfRange(offs, 1, offs.length);
        }

        gammaSeqVertices = new IncreasingEliasGammaSeq(vertices);
        gammaSeqOffs = new IncreasingEliasGammaSeq(offs);

        totalPinnedSize += gammaSeqVertices.sizeInBytes();
        totalPinnedSize += gammaSeqOffs.sizeInBytes();
        pointerIdxBuffer = null;

    }
}

From source file:com.opengamma.maths.lowlevelapi.datatypes.primitive.PackedMatrix.java

/**
 * Constructs from array of arrays *but* allows zeros to be packed into the data structure.
 * This is particularly useful for banded matrices in which
 * allowing some zero padding is beneficial in terms of making access patterns more simple.
 * @param aMatrix is an n columns x m rows matrix stored as a row major array of arrays.
 * @param zeroPattern is enumerated based on {@link allowZerosOn} to allow the packing of zeros in the packed data structure.
 * @param rows is the number of rows in the matrix that is to be represented
 * @param cols is the number of columns in the matrix that is to be represented
 *//*  ww w .ja v a 2s .  co  m*/
public PackedMatrix(double[][] aMatrix, allowZerosOn zeroPattern, int rows, int cols) {
    Validate.notNull(aMatrix);
    // test if ragged
    if (MatrixPrimitiveUtils.isRagged(aMatrix)) {
        throw new NotImplementedException("Construction from ragged array not implemented");
    }

    _rows = rows;
    _cols = cols;
    _els = _rows * _cols;

    double[] tmp = new double[_els];
    _rowPtr = new int[_rows];
    _colCount = new int[_rows + 1];

    boolean isSet;
    double val;
    int count = 0;
    _colCount[0] = 0;

    switch (zeroPattern) {
    case bothSides: {
        // make flat!
        for (int i = 0; i < _rows; i++) {
            _rowPtr[i] = 0;
            for (int j = 0; j < _cols; j++) { //for each col
                tmp[count] = aMatrix[i][j]; // assign to tmp
                count++;
            }
            _colCount[i + 1] += count;
        }
        break;
    }
    case rightSide: {
        for (int i = 0; i < _rows; i++) {
            isSet = false; // init each starting point as not being set and look for it.
            for (int j = 0; j < cols; j++) { //for each col
                val = aMatrix[i][j]; // get the value
                if (Double.doubleToLongBits(val) != 0L || isSet) { // test if not zero and whether we have found the start of the data yet
                    tmp[count] = val; // assign to tmp
                    count++;
                    if (!isSet) { // if we haven't already set the starting point in the row
                        _rowPtr[i] = j; // assign this element as the starting point
                        isSet = true; // and ensure we don't come back here for this row
                    }
                }
            }
            _colCount[i + 1] += count;
        }
        break;
    }
    case leftSide: {
        for (int i = 0; i < _rows; i++) {
            isSet = false; // init each starting point as not being set and look for it.

            // search backwards and find the end point
            int end = -1;
            for (int j = _cols - 1; j >= 0; j--) {
                val = aMatrix[i][j];
                if (Double.doubleToLongBits(val) != 0L) { // test if not zero
                    end = j;
                    break;
                }
            }

            // flatten
            for (int j = 0; j < end + 1; j++) { //for each col
                val = aMatrix[i][j]; // get the value
                tmp[count] = val; // assign to tmp
                count++;
                if (!isSet) { // if we haven't already set the starting point in the row
                    _rowPtr[i] = j; // assign this element as the starting point
                    isSet = true; // and ensure we don't come back here for this row
                }
            }
            _colCount[i + 1] += count;
        }

        break;
    }
    case none: {
        // make flat!
        for (int i = 0; i < _rows; i++) {
            isSet = false; // init each starting point as not being set and look for it.
            // search backwards and find the end point
            int end = 0;
            for (int j = _cols - 1; j >= 0; j--) {
                val = aMatrix[i][j];
                if (Double.doubleToLongBits(val) != 0L) { // test if not zero
                    end = j;
                    break;
                }
            }
            // flatten
            for (int j = 0; j < end + 1; j++) { //for each col
                val = aMatrix[i][j]; // get the value
                if (Double.doubleToLongBits(val) != 0L || isSet) { // test if not zero
                    tmp[count] = val; // assign to tmp
                    count++;
                    if (!isSet) { // if we haven't already set the starting point in the row
                        _rowPtr[i] = j; // assign this element as the starting point
                        isSet = true; // and ensure we don't come back here for this row
                    }
                }
            }
            _colCount[i + 1] += count;
        }
        break;
    }

    }
    _data = Arrays.copyOfRange(tmp, 0, count);
}

From source file:com.aperigeek.dropvault.web.dao.MongoFileService.java

public void put(final String username, String resource, InputStream data, long length, String contentType,
        final char[] password) throws ResourceNotFoundException, IOException {

    final String[] path = resource.split("/");
    Resource parent = getResourceAt(getRootFolder(username), Arrays.copyOfRange(path, 0, path.length - 2));

    DBCollection files = mongo.getDataBase().getCollection("files");
    DBCollection contents = mongo.getDataBase().getCollection("contents");

    ContentDetector contentDetector = null;
    if (contentType == null) {
        PipedInputStream pipeIn = new PipedInputStream();
        PipedOutputStream pipeOut = new PipedOutputStream(pipeIn);
        TeeInputStream tee = new TeeInputStream(data, pipeOut, true);
        contentDetector = new ContentDetector(path[path.length - 1], pipeIn);
        contentDetector.start();/*  www  . j a va 2 s. c  om*/
        data = tee;
    }

    final File dataFile = createDataFile(data, username, password);

    if (contentDetector != null) {
        try {
            contentDetector.join();
            contentType = contentDetector.getContentType();
        } catch (InterruptedException ex) {
            Logger.getLogger(MongoFileService.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    Resource child = getChild(parent, path[path.length - 1]);
    if (child != null) {
        DBObject filter = new BasicDBObject();
        filter.put("_id", child.getId());
        DBObject update = new BasicDBObject("modificationDate", new Date());
        update.put("contentLength", length);
        update.put("contentType", contentType);
        files.update(filter, new BasicDBObject("$set", update));

        contents.update(new BasicDBObject("resource", child.getId()),
                new BasicDBObject("$set", new BasicDBObject("file", dataFile.getAbsolutePath())));
    } else {
        DBObject childObj = new BasicDBObject();
        ObjectId objId = new ObjectId();
        childObj.put("_id", objId);
        childObj.put("user", username);
        childObj.put("name", path[path.length - 1]);
        childObj.put("parent", parent.getId());
        childObj.put("type", Resource.ResourceType.FILE.toString());
        childObj.put("creationDate", new Date());
        childObj.put("modificationDate", new Date());
        childObj.put("contentType", contentType);
        childObj.put("contentLength", length);

        files.insert(childObj);

        DBObject content = new BasicDBObject();
        content.put("resource", objId);
        content.put("file", dataFile.getAbsolutePath());

        contents.insert(content);

        files.update(new BasicDBObject("_id", parent.getId()),
                new BasicDBObject("$set", new BasicDBObject("modificationDate", new Date())));

        child = buildResource(childObj);
    }

    final String fContentType = contentType;
    final Resource fChild = child;
    new Thread() {
        public void run() {
            try {
                Map<String, String> metadata = extractionService.extractContent(path[path.length - 1],
                        readFile(dataFile, username, password), fContentType);

                metadata.put("name", path[path.length - 1]);

                indexService.remove(username, new String(password), fChild.getId().toString());
                indexService.index(username, new String(password), fChild.getId().toString(), metadata);
            } catch (Exception ex) {
                Logger.getLogger(MongoFileService.class.getName()).log(Level.SEVERE,
                        "Index failed for " + path[path.length - 1], ex);
            }
        }
    }.start();
}

From source file:com.blockwithme.longdb.leveldb.LevelDBTable.java

/** Removes the column ids. */
private void removeColIds(final long theKey, final long theColumnId) {
    final Bytes rowIdBytes = new Bytes(Util.toByta(theKey));
    if (dbInstance.get(toArray(rowIdBytes), readOpts) != null) {
        final byte[] allColIds = dbInstance.get(toArray(rowIdBytes), readOpts);
        final long position = Util.indexOf(allColIds, theColumnId);
        if (position == -1)
            return;
        final byte[] part1Bytes = Arrays.copyOfRange(allColIds, 0, (int) position);
        final byte[] part2Bytes = Arrays.copyOfRange(allColIds, (int) (position + LONG_BYTES),
                allColIds.length);/*from   w w  w .  j a  v  a2s . co m*/
        // TODO try to use dbInstance.write() instead of delete()/put().
        // write() uses WriteBatch to 'batch' all the db updates
        // belonging to a single transaction.
        if (part1Bytes.length == 0 && part2Bytes.length == 0) {
            dbInstance.delete(toArray(rowIdBytes), writeOpts);
        } else {
            final byte[] newColIds = ArrayUtils.addAll(part1Bytes, part2Bytes);
            dbInstance.put(toArray(rowIdBytes), newColIds, writeOpts);
        }
    }
}

From source file:com.risevision.ui.server.utils.MakeRequestServlet.java

private static String getEncodingType(byte[] bytes) {
    String header = "";
    int start = -1, end = -1;

    for (int i = 0; i < bytes.length; i++) {
        if (bytes[i] == '<') {
            start = i;//from  ww w  . jav a 2 s. com
        } else if (start != -1) {
            if (bytes[i] == '>') {
                end = i;
                break;
            }
        }
    }

    header = new String(Arrays.copyOfRange(bytes, start, end));
    if (header.contains("?xml")) {
        return HtmlParser.getPropertyValue(header, "encoding");
    }

    return "";
}

From source file:com.opengamma.analytics.math.interpolation.MonotonicityPreservingQuinticSplineInterpolator.java

@Override
public PiecewisePolynomialResult interpolate(final double[] xValues, final double[][] yValuesMatrix) {
    ArgumentChecker.notNull(xValues, "xValues");
    ArgumentChecker.notNull(yValuesMatrix, "yValuesMatrix");

    ArgumentChecker.isTrue(/* w  ww .  ja  va  2  s . c o  m*/
            xValues.length == yValuesMatrix[0].length | xValues.length + 2 == yValuesMatrix[0].length,
            "(xValues length = yValuesMatrix's row vector length) or (xValues length + 2 = yValuesMatrix's row vector length)");
    ArgumentChecker.isTrue(xValues.length > 2, "Data points should be more than 2");

    final int nDataPts = xValues.length;
    final int yValuesLen = yValuesMatrix[0].length;
    final int dim = yValuesMatrix.length;

    for (int i = 0; i < nDataPts; ++i) {
        ArgumentChecker.isFalse(Double.isNaN(xValues[i]), "xValues containing NaN");
        ArgumentChecker.isFalse(Double.isInfinite(xValues[i]), "xValues containing Infinity");
    }
    for (int i = 0; i < yValuesLen; ++i) {
        for (int j = 0; j < dim; ++j) {
            ArgumentChecker.isFalse(Double.isNaN(yValuesMatrix[j][i]), "yValuesMatrix containing NaN");
            ArgumentChecker.isFalse(Double.isInfinite(yValuesMatrix[j][i]),
                    "yValuesMatrix containing Infinity");
        }
    }
    for (int i = 0; i < nDataPts; ++i) {
        for (int j = i + 1; j < nDataPts; ++j) {
            ArgumentChecker.isFalse(xValues[i] == xValues[j], "xValues should be distinct");
        }
    }

    double[] xValuesSrt = new double[nDataPts];
    DoubleMatrix2D[] coefMatrix = new DoubleMatrix2D[dim];

    for (int i = 0; i < dim; ++i) {
        xValuesSrt = Arrays.copyOf(xValues, nDataPts);
        double[] yValuesSrt = new double[nDataPts];
        if (nDataPts == yValuesLen) {
            yValuesSrt = Arrays.copyOf(yValuesMatrix[i], nDataPts);
        } else {
            yValuesSrt = Arrays.copyOfRange(yValuesMatrix[i], 1, nDataPts + 1);
        }
        ParallelArrayBinarySort.parallelBinarySort(xValuesSrt, yValuesSrt);

        final double[] intervals = _solver.intervalsCalculator(xValuesSrt);
        final double[] slopes = _solver.slopesCalculator(yValuesSrt, intervals);
        final PiecewisePolynomialResult result = _method.interpolate(xValues, yValuesMatrix[i]);

        ArgumentChecker.isTrue(result.getOrder() >= 3, "Primary interpolant should be degree >= 2");

        final double[] initialFirst = _function.differentiate(result, xValuesSrt).getData()[0];
        final double[] initialSecond = _function.differentiateTwice(result, xValuesSrt).getData()[0];
        final double[] first = firstDerivativeCalculator(yValuesSrt, intervals, slopes, initialFirst);

        boolean modFirst = false;
        int k;
        double[] aValues = aValuesCalculator(slopes, first);
        double[] bValues = bValuesCalculator(slopes, first);
        double[][] intervalsA = getIntervalsA(intervals, slopes, first, bValues);
        double[][] intervalsB = getIntervalsB(intervals, slopes, first, aValues);
        while (modFirst == false) {
            k = 0;
            for (int j = 0; j < nDataPts - 2; ++j) {
                if (first[j + 1] > 0.) {
                    if (intervalsA[j + 1][1] + Math.abs(intervalsA[j + 1][1]) * ERROR < intervalsB[j][0]
                            - Math.abs(intervalsB[j][0]) * ERROR
                            | intervalsA[j + 1][0] - Math.abs(intervalsA[j + 1][0]) * ERROR > intervalsB[j][1]
                                    + Math.abs(intervalsB[j][1]) * ERROR) {
                        ++k;
                        first[j + 1] = firstDerivativesRecalculator(intervals, slopes, aValues, bValues, j + 1);
                    }
                }
            }
            if (k == 0) {
                modFirst = true;
            }
            aValues = aValuesCalculator(slopes, first);
            bValues = bValuesCalculator(slopes, first);
            intervalsA = getIntervalsA(intervals, slopes, first, bValues);
            intervalsB = getIntervalsB(intervals, slopes, first, aValues);
        }
        final double[] second = secondDerivativeCalculator(initialSecond, intervalsA, intervalsB);

        coefMatrix[i] = new DoubleMatrix2D(_solver.solve(yValuesSrt, intervals, slopes, first, second));
    }

    final int nIntervals = coefMatrix[0].getNumberOfRows();
    final int nCoefs = coefMatrix[0].getNumberOfColumns();
    double[][] resMatrix = new double[dim * nIntervals][nCoefs];

    for (int i = 0; i < nIntervals; ++i) {
        for (int j = 0; j < dim; ++j) {
            resMatrix[dim * i + j] = coefMatrix[j].getRowVector(i).getData();
        }
    }

    for (int i = 0; i < (nIntervals * dim); ++i) {
        for (int j = 0; j < nCoefs; ++j) {
            ArgumentChecker.isFalse(Double.isNaN(resMatrix[i][j]), "Too large input");
            ArgumentChecker.isFalse(Double.isInfinite(resMatrix[i][j]), "Too large input");
        }
    }

    return new PiecewisePolynomialResult(new DoubleMatrix1D(xValuesSrt), new DoubleMatrix2D(resMatrix), nCoefs,
            dim);
}

From source file:de.tudarmstadt.ukp.dkpro.spelling.experiments.hoo2012.hoo2011.FixedCandidateTrigramProbabilityDetector.java

@Override
protected double getSentenceProbability(List<String> words) throws AnalysisEngineProcessException {
    double sentenceProbability = 0.0;

    if (words.size() < 1) {
        return 0.0;
    }//from w w  w  . j  av  a  2  s . co  m

    long nrOfUnigrams;
    try {
        nrOfUnigrams = provider.getNrOfTokens();
    } catch (Exception e) {
        throw new AnalysisEngineProcessException(e);
    }

    List<String> trigrams = new ArrayList<String>();

    // in the google n-grams this is not represented (only single BOS markers)
    // but I leave it in place in case we add another n-gram provider
    trigrams.add(NGramDetectorUtils.getTrigram(BOS, BOS, words.get(0)));

    if (words.size() > 1) {
        trigrams.add(NGramDetectorUtils.getTrigram(BOS, words.get(0), words.get(1)));
    }

    for (String trigram : new NGramStringIterable(words, 3, 3)) {
        trigrams.add(trigram);
    }

    // FIXME - implement backoff or linear interpolation

    for (String trigram : trigrams) {
        long trigramFreq = getNGramCount(trigram);

        String[] parts = StringUtils.split(trigram, " ");

        String bigram = StringUtils.join(Arrays.copyOfRange(parts, 0, 2), " ");
        long bigramFreq = getNGramCount(bigram);

        String unigram = StringUtils.join(Arrays.copyOfRange(parts, 0, 1), " ");
        long unigramFreq = getNGramCount(unigram);

        if (trigramFreq < 1) {
            trigramFreq = 1;
        }
        if (bigramFreq < 1) {
            bigramFreq = 1;
        }
        if (unigramFreq < 1) {
            unigramFreq = 1;
        }

        double trigramProb = Math.log((double) trigramFreq / bigramFreq);
        double bigramProb = Math.log((double) bigramFreq / unigramFreq);
        double unigramProb = Math.log((double) unigramFreq / nrOfUnigrams);

        double interpolated = (trigramProb + bigramProb + unigramProb) / 3.0;

        sentenceProbability += interpolated;
    }

    return Math.exp(sentenceProbability);
}

From source file:eu.europeana.querylog.learn.Evaluate.java

private float[] getBParams(float[] params) {
    return Arrays.copyOfRange(params, 1 + nFields, 1 + 2 * nFields);
}