Example usage for java.util Queue poll

List of usage examples for java.util Queue poll

Introduction

In this page you can find the example usage for java.util Queue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:org.neo4j.io.pagecache.PageCacheTest.java

private PageSwapperFactory factoryCountingSyncDevice(final AtomicInteger syncDeviceCounter,
        final Queue<Integer> expectedCountsInForce) {
    SingleFilePageSwapperFactory factory = new SingleFilePageSwapperFactory() {
        @Override//w  w w.ja v  a 2  s .c o m
        public void syncDevice() {
            super.syncDevice();
            syncDeviceCounter.getAndIncrement();
        }

        @Override
        public PageSwapper createPageSwapper(File file, int filePageSize, PageEvictionCallback onEviction,
                boolean createIfNotExist) throws IOException {
            PageSwapper delegate = super.createPageSwapper(file, filePageSize, onEviction, createIfNotExist);
            return new DelegatingPageSwapper(delegate) {
                @Override
                public void force() throws IOException {
                    super.force();
                    assertThat(syncDeviceCounter.get(), is(expectedCountsInForce.poll()));
                }
            };
        }
    };
    factory.setFileSystemAbstraction(fs);
    return factory;
}

From source file:ch.entwine.weblounge.maven.S3DeployMojo.java

/**
 * //  www. j a va2  s.c o m
 * {@inheritDoc}
 * 
 * @see org.apache.maven.plugin.Mojo#execute()
 */
public void execute() throws MojoExecutionException, MojoFailureException {

    // Setup AWS S3 client
    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    AmazonS3Client uploadClient = new AmazonS3Client(credentials);
    TransferManager transfers = new TransferManager(credentials);

    // Make sure key prefix does not start with a slash but has one at the
    // end
    if (keyPrefix.startsWith("/"))
        keyPrefix = keyPrefix.substring(1);
    if (!keyPrefix.endsWith("/"))
        keyPrefix = keyPrefix + "/";

    // Keep track of how much data has been transferred
    long totalBytesTransferred = 0L;
    int items = 0;
    Queue<Upload> uploads = new LinkedBlockingQueue<Upload>();

    try {
        // Check if S3 bucket exists
        getLog().debug("Checking whether bucket " + bucket + " exists");
        if (!uploadClient.doesBucketExist(bucket)) {
            getLog().error("Desired bucket '" + bucket + "' does not exist!");
            return;
        }

        getLog().debug("Collecting files to transfer from " + resources.getDirectory());
        List<File> res = getResources();
        for (File file : res) {
            // Make path of resource relative to resources directory
            String filename = file.getName();
            String extension = FilenameUtils.getExtension(filename);
            String path = file.getPath().substring(resources.getDirectory().length());
            String key = concat("/", keyPrefix, path).substring(1);

            // Delete old file version in bucket
            getLog().debug("Removing existing object at " + key);
            uploadClient.deleteObject(bucket, key);

            // Setup meta data
            ObjectMetadata meta = new ObjectMetadata();
            meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600));

            FileInputStream fis = null;
            GZIPOutputStream gzipos = null;
            final File fileToUpload;

            if (gzip && ("js".equals(extension) || "css".equals(extension))) {
                try {
                    fis = new FileInputStream(file);
                    File gzFile = File.createTempFile(file.getName(), null);
                    gzipos = new GZIPOutputStream(new FileOutputStream(gzFile));
                    IOUtils.copy(fis, gzipos);
                    fileToUpload = gzFile;
                    meta.setContentEncoding("gzip");
                    if ("js".equals(extension))
                        meta.setContentType("text/javascript");
                    if ("css".equals(extension))
                        meta.setContentType("text/css");
                } catch (FileNotFoundException e) {
                    getLog().error(e);
                    continue;
                } catch (IOException e) {
                    getLog().error(e);
                    continue;
                } finally {
                    IOUtils.closeQuietly(fis);
                    IOUtils.closeQuietly(gzipos);
                }
            } else {
                fileToUpload = file;
            }

            // Do a random check for existing errors before starting the next upload
            if (erroneousUpload != null)
                break;

            // Create put object request
            long bytesToTransfer = fileToUpload.length();
            totalBytesTransferred += bytesToTransfer;
            PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload);
            request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer));
            request.setMetadata(meta);

            // Schedule put object request
            getLog().info(
                    "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")");
            Upload upload = transfers.upload(request);
            uploads.add(upload);
            items++;
        }
    } catch (AmazonServiceException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    } catch (AmazonClientException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    }

    // Wait for uploads to be finished
    String currentUpload = null;
    try {
        Thread.sleep(1000);
        getLog().info("Waiting for " + uploads.size() + " uploads to finish...");
        while (!uploads.isEmpty()) {
            Upload upload = uploads.poll();
            currentUpload = upload.getDescription().substring("Uploading to ".length());
            if (TransferState.InProgress.equals(upload.getState()))
                getLog().debug("Waiting for upload " + currentUpload + " to finish");
            upload.waitForUploadResult();
        }
    } catch (AmazonServiceException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (AmazonClientException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (InterruptedException e) {
        getLog().debug("Interrupted while waiting for upload to finish");
    }

    // Check for errors that happened outside of the actual uploading
    if (erroneousUpload != null) {
        throw new MojoExecutionException("Error while uploading " + erroneousUpload);
    }

    getLog().info("Deployed " + items + " files ("
            + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket);
}

From source file:org.apache.lucene.index.IndexWriter.java

private boolean processEvents(Queue<Event> queue, boolean triggerMerge, boolean forcePurge) throws IOException {
    Event event;//from   w  ww.  j a va  2 s.  c  o m
    boolean processed = false;
    while ((event = queue.poll()) != null) {
        processed = true;
        event.process(this, triggerMerge, forcePurge);
    }
    return processed;
}

From source file:de.uni_koblenz.jgralab.utilities.rsa.Rsa2Tg.java

/**
 * breadth first search over SpecializesIncidenceClass edges for closest
 * superclass with correct rolename//from  w w  w.  ja v  a  2  s.  co  m
 * 
 * @param inc
 * @param rolename
 * @return
 */
private IncidenceClass findClosestSuperclassWithRolename(IncidenceClass inc, String rolename) {
    IncidenceClass sup = null;
    Queue<IncidenceClass> q = new LinkedList<IncidenceClass>();
    LocalBooleanGraphMarker m = new LocalBooleanGraphMarker(sg);
    m.mark(inc);
    q.offer(inc);
    while (!q.isEmpty()) {
        IncidenceClass curr = q.poll();
        m.mark(curr);
        if ((curr != inc) && rolename.equals(curr.get_roleName())) {
            sup = curr;
            break;
        }
        for (SpecializesIncidenceClass sic : curr.getIncidentEdges(SpecializesIncidenceClass.class,
                de.uni_koblenz.jgralab.Direction.VERTEX_TO_EDGE)) {
            IncidenceClass i = (IncidenceClass) sic.getOmega();
            if (!m.isMarked(i)) {
                m.mark(i);
                q.offer(i);
            }
        }

    }
    return sup;
}

From source file:nl.b3p.viewer.admin.stripes.GeoServiceActionBean.java

public Resolution generateSld() throws Exception {

    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
    dbf.setNamespaceAware(true);// w  ww  .  j a v a  2 s  .  co m
    DocumentBuilder db = dbf.newDocumentBuilder();
    Document sldDoc = db.newDocument();

    Element sldEl = sldDoc.createElementNS(NS_SLD, "StyledLayerDescriptor");
    sldDoc.appendChild(sldEl);
    sldEl.setAttributeNS(NS_SLD, "version", "1.0.0");
    sldEl.setAttributeNS("http://www.w3.org/2001/XMLSchema-instance", "xsi:schemaLocation",
            "http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd");
    sldEl.setAttribute("xmlns:ogc", NS_OGC);
    sldEl.setAttribute("xmlns:gml", NS_GML);
    service.loadLayerTree();

    Queue<Layer> layerStack = new LinkedList();
    Layer l = service.getTopLayer();
    while (l != null) {
        layerStack.addAll(service.getLayerChildrenCache(l));

        if (l.getName() != null) {
            Element nlEl = sldDoc.createElementNS(NS_SLD, "NamedLayer");
            sldEl.appendChild(nlEl);
            String title = l.getTitleAlias() != null ? l.getTitleAlias() : l.getTitle();
            if (title != null) {
                nlEl.appendChild(sldDoc.createComment(" Layer '" + title + "' "));
            }
            Element nEl = sldDoc.createElementNS(NS_SLD, "Name");
            nEl.setTextContent(l.getName());
            nlEl.appendChild(nEl);

            if (l.getFeatureType() != null) {
                String protocol = "";
                if (l.getFeatureType().getFeatureSource() != null) {
                    protocol = " (protocol " + l.getFeatureType().getFeatureSource().getProtocol() + ")";
                }

                String ftComment = " This layer has a feature type" + protocol
                        + " you can use in a FeatureTypeConstraint element as follows:\n";
                ftComment += "            <LayerFeatureConstraints>\n";
                ftComment += "                <FeatureTypeConstraint>\n";
                ftComment += "                    <FeatureTypeName>" + l.getFeatureType().getTypeName()
                        + "</FeatureTypeName>\n";
                ftComment += "                    Add ogc:Filter or Extent element here. ";
                if (l.getFeatureType().getAttributes().isEmpty()) {
                    ftComment += " No feature type attributes are known.\n";
                } else {
                    ftComment += " You can use the following feature type attributes in ogc:PropertyName elements:\n";
                    for (AttributeDescriptor ad : l.getFeatureType().getAttributes()) {
                        ftComment += "                    <ogc:PropertyName>" + ad.getName()
                                + "</ogc:PropertyName>";
                        if (ad.getAlias() != null) {
                            ftComment += " (" + ad.getAlias() + ")";
                        }
                        if (ad.getType() != null) {
                            ftComment += " (type: " + ad.getType() + ")";
                        }
                        ftComment += "\n";
                    }
                }
                ftComment += "                </FeatureTypeConstraint>\n";
                ftComment += "            </LayerFeatureConstraints>\n";
                ftComment += "        ";
                nlEl.appendChild(sldDoc.createComment(ftComment));
            }

            nlEl.appendChild(sldDoc.createComment(" Add a UserStyle or NamedStyle element here "));
            String styleComment = " (no server-side named styles are known other than 'default') ";
            ClobElement styleDetail = l.getDetails().get(Layer.DETAIL_WMS_STYLES);
            if (styleDetail != null) {
                try {
                    JSONArray styles = new JSONArray(styleDetail.getValue());

                    if (styles.length() > 0) {
                        styleComment = " The following NamedStyles are available according to the capabilities: \n";

                        for (int i = 0; i < styles.length(); i++) {
                            JSONObject jStyle = styles.getJSONObject(i);

                            styleComment += "            <NamedStyle><Name>" + jStyle.getString("name")
                                    + "</Name></NamedStyle>";
                            if (jStyle.has("title")) {
                                styleComment += " (" + jStyle.getString("title") + ")";
                            }
                            styleComment += "\n";
                        }
                    }

                } catch (JSONException e) {
                }
                styleComment += "        ";
            }
            nlEl.appendChild(sldDoc.createComment(styleComment));
        }

        l = layerStack.poll();
    }

    TransformerFactory tf = TransformerFactory.newInstance();
    Transformer t = tf.newTransformer();
    t.setOutputProperty(OutputKeys.INDENT, "yes");
    t.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "4");
    t.setOutputProperty(OutputKeys.ENCODING, "UTF-8");

    DOMSource source = new DOMSource(sldDoc);
    ByteArrayOutputStream bos = new ByteArrayOutputStream();
    StreamResult result = new StreamResult(bos);
    t.transform(source, result);
    generatedSld = new String(bos.toByteArray(), "UTF-8");

    // indent doesn't add newline after XML declaration
    generatedSld = generatedSld.replaceFirst("\"\\?><StyledLayerDescriptor", "\"?>\n<StyledLayerDescriptor");
    return new ForwardResolution(JSP_EDIT_SLD);
}

From source file:de.uni_koblenz.jgralab.utilities.rsa.Rsa2Tg.java

/**
 * //from  www.jav a 2  s . c  om
 */
private void createMayBeNestedIn() {
    System.out.println("Create MayBeNestedIn relations ...");
    updateNestedElements();

    // stores the GraphElementClass which have nested elements but are not
    // nested in another GraphElementClass
    Queue<GraphElementClass> workingList = new LinkedList<GraphElementClass>();
    Queue<GraphElementClass> topLevelNestingElements = new LinkedList<GraphElementClass>();

    // all edges have to be treated
    for (EdgeClass ec : sg.getEdgeClassVertices()) {
        workingList.add(ec);
        topLevelNestingElements.add(ec);
    }

    // create the explicitly modeled MayBeNestedIn edges
    for (GraphElement<?, ?, ?, ?> ge : nestedElements.getMarkedElements()) {
        GraphElementClass containingGEC = (GraphElementClass) ge;
        assert nestedElements.getMark(containingGEC) != null;
        assert !nestedElements.getMark(containingGEC).isEmpty();

        for (GraphElementClass containedGEC : nestedElements.getMark(containingGEC)) {
            sg.createMayBeNestedIn(containedGEC, containingGEC);
            insertContainingGECIntoWorkingList(containingGEC, containedGEC, topLevelNestingElements);
        }
    }

    checkAcyclicityOfMayBeNestedIn(topLevelNestingElements);

    // check correctness of explicit modeled MayBeNestedIn edges and create
    // implicit MayBeNestedIn edges during a breadth first search over the
    // GraphElementClasses participating in the MayBeNestedIn tree
    LocalBooleanGraphMarker isImplicitlyNested = new LocalBooleanGraphMarker(sg);
    while (!workingList.isEmpty()) {
        GraphElementClass current = workingList.poll();
        assert current != null;

        if (EdgeClass.class.isInstance(current)) {
            EdgeClass containedEC = (EdgeClass) current;

            // check constraints for explicitly nested EdgeClasses
            for (MayBeNestedIn_nestedElement i : containedEC.getIncidences(MayBeNestedIn_nestedElement.class)) {
                if (!isImplicitlyNested.isMarked(i.getEdge())) {
                    GraphElementClass containingGEC = (GraphElementClass) i.getThat();
                    checkNestingConstraints(containedEC, containingGEC);
                }
            }

            // create implicit MayBeNestedIn edges
            for (GraphElementClass containingGEC : getAllNestingElements(containedEC)) {
                isImplicitlyNested.mark(sg.createMayBeNestedIn(containedEC, containingGEC));
                if (topLevelNestingElements.contains(containedEC)) {
                    topLevelNestingElements.remove(containedEC);
                }
            }
        }

        // insert all nested GraphElementClasses into workingList
        for (MayBeNestedIn_nestingElement i : current.getIncidences(MayBeNestedIn_nestingElement.class)) {
            if (!workingList.contains(i.getThat()) && !isImplicitlyNested.isMarked(i.getEdge())) {
                workingList.add((GraphElementClass) i.getThat());
            }
        }
    }

    deleteDuplicateMayBeNestedIn();

    checkAcyclicityOfMayBeNestedIn(topLevelNestingElements);
}

From source file:edu.umn.cs.spatialHadoop.indexing.RTree.java

/**
 * Builds the RTree given a serialized list of elements. It uses the given
 * stockObject to deserialize these elements using
 * {@link TextSerializable#fromText(Text)} and build the tree. Also writes the
 * created tree to the disk directly.// w w  w .j a  v a  2s  .c o  m
 * 
 * @param element_bytes
 *          - serialization of all elements separated by new lines
 * @param offset
 *          - offset of the first byte to use in elements_bytes
 * @param len
 *          - number of bytes to use in elements_bytes
 * @param degree
 *          - Degree of the R-tree to build in terms of number of children per
 *          node
 * @param dataOut
 *          - output stream to write the result to.
 * @param fast_sort
 *          - setting this to <code>true</code> allows the method to run
 *          faster by materializing the offset of each element in the list
 *          which speeds up the comparison. However, this requires an
 *          additional 16 bytes per element. So, for each 1M elements, the
 *          method will require an additional 16 M bytes (approximately).
 */
public static void bulkLoadWrite(final byte[] element_bytes, final int offset, final int len, final int degree,
        DataOutput dataOut, final Shape stockObject, final boolean fast_sort) {
    try {

        int elementCount = 0;
        // Count number of elements in the given text
        int i_start = offset;
        final Text line = new Text();
        while (i_start < offset + len) {
            int i_end = skipToEOL(element_bytes, i_start);
            // Extract the line without end of line character
            line.set(element_bytes, i_start, i_end - i_start - 1);
            stockObject.fromText(line);
            elementCount++;
            i_start = i_end;
        }
        LOG.info("Bulk loading an RTree with " + elementCount + " elements");

        // It turns out the findBestDegree returns the best degree when the whole
        // tree is loaded to memory when processed. However, as current algorithms
        // process the tree while it's on disk, a higher degree should be selected
        // such that a node fits one file block (assumed to be 4K).
        //final int degree = findBestDegree(bytesAvailable, elementCount);

        int height = Math.max(1, (int) Math.ceil(Math.log(elementCount) / Math.log(degree)));
        int leafNodeCount = (int) Math.pow(degree, height - 1);
        if (elementCount < 2 * leafNodeCount && height > 1) {
            height--;
            leafNodeCount = (int) Math.pow(degree, height - 1);
        }
        int nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1));
        int nonLeafNodeCount = nodeCount - leafNodeCount;

        // Keep track of the offset of each element in the text
        final int[] offsets = new int[elementCount];
        final double[] xs = fast_sort ? new double[elementCount] : null;
        final double[] ys = fast_sort ? new double[elementCount] : null;

        i_start = offset;
        line.clear();
        for (int i = 0; i < elementCount; i++) {
            offsets[i] = i_start;
            int i_end = skipToEOL(element_bytes, i_start);
            if (xs != null) {
                // Extract the line with end of line character
                line.set(element_bytes, i_start, i_end - i_start - 1);
                stockObject.fromText(line);
                // Sample center of the shape
                xs[i] = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2;
                ys[i] = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2;
            }
            i_start = i_end;
        }

        /**A struct to store information about a split*/
        class SplitStruct extends Rectangle {
            /**Start and end index for this split*/
            int index1, index2;
            /**Direction of this split*/
            byte direction;
            /**Index of first element on disk*/
            int offsetOfFirstElement;

            static final byte DIRECTION_X = 0;
            static final byte DIRECTION_Y = 1;

            SplitStruct(int index1, int index2, byte direction) {
                this.index1 = index1;
                this.index2 = index2;
                this.direction = direction;
            }

            @Override
            public void write(DataOutput out) throws IOException {
                out.writeInt(offsetOfFirstElement);
                super.write(out);
            }

            void partition(Queue<SplitStruct> toBePartitioned) {
                IndexedSortable sortableX;
                IndexedSortable sortableY;

                if (fast_sort) {
                    // Use materialized xs[] and ys[] to do the comparisons
                    sortableX = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap xs
                            double tempx = xs[i];
                            xs[i] = xs[j];
                            xs[j] = tempx;
                            // Swap ys
                            double tempY = ys[i];
                            ys[i] = ys[j];
                            ys[j] = tempY;
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            if (xs[i] < xs[j])
                                return -1;
                            if (xs[i] > xs[j])
                                return 1;
                            return 0;
                        }
                    };

                    sortableY = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap xs
                            double tempx = xs[i];
                            xs[i] = xs[j];
                            xs[j] = tempx;
                            // Swap ys
                            double tempY = ys[i];
                            ys[i] = ys[j];
                            ys[j] = tempY;
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            if (ys[i] < ys[j])
                                return -1;
                            if (ys[i] > ys[j])
                                return 1;
                            return 0;
                        }
                    };
                } else {
                    // No materialized xs and ys. Always deserialize objects to compare
                    sortableX = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            // Get end of line
                            int eol = skipToEOL(element_bytes, offsets[i]);
                            line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                            stockObject.fromText(line);
                            double xi = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2;

                            eol = skipToEOL(element_bytes, offsets[j]);
                            line.set(element_bytes, offsets[j], eol - offsets[j] - 1);
                            stockObject.fromText(line);
                            double xj = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2;
                            if (xi < xj)
                                return -1;
                            if (xi > xj)
                                return 1;
                            return 0;
                        }
                    };

                    sortableY = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            int eol = skipToEOL(element_bytes, offsets[i]);
                            line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                            stockObject.fromText(line);
                            double yi = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2;

                            eol = skipToEOL(element_bytes, offsets[j]);
                            line.set(element_bytes, offsets[j], eol - offsets[j] - 1);
                            stockObject.fromText(line);
                            double yj = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2;
                            if (yi < yj)
                                return -1;
                            if (yi > yj)
                                return 1;
                            return 0;
                        }
                    };
                }

                final IndexedSorter sorter = new QuickSort();

                final IndexedSortable[] sortables = new IndexedSortable[2];
                sortables[SplitStruct.DIRECTION_X] = sortableX;
                sortables[SplitStruct.DIRECTION_Y] = sortableY;

                sorter.sort(sortables[direction], index1, index2);

                // Partition into maxEntries partitions (equally) and
                // create a SplitStruct for each partition
                int i1 = index1;
                for (int iSplit = 0; iSplit < degree; iSplit++) {
                    int i2 = index1 + (index2 - index1) * (iSplit + 1) / degree;
                    SplitStruct newSplit = new SplitStruct(i1, i2, (byte) (1 - direction));
                    toBePartitioned.add(newSplit);
                    i1 = i2;
                }
            }
        }

        // All nodes stored in level-order traversal
        Vector<SplitStruct> nodes = new Vector<SplitStruct>();
        final Queue<SplitStruct> toBePartitioned = new LinkedList<SplitStruct>();
        toBePartitioned.add(new SplitStruct(0, elementCount, SplitStruct.DIRECTION_X));

        while (!toBePartitioned.isEmpty()) {
            SplitStruct split = toBePartitioned.poll();
            if (nodes.size() < nonLeafNodeCount) {
                // This is a non-leaf
                split.partition(toBePartitioned);
            }
            nodes.add(split);
        }

        if (nodes.size() != nodeCount) {
            throw new RuntimeException(
                    "Expected node count: " + nodeCount + ". Real node count: " + nodes.size());
        }

        // Now we have our data sorted in the required order. Start building
        // the tree.
        // Store the offset of each leaf node in the tree
        FSDataOutputStream fakeOut = null;
        try {
            fakeOut = new FSDataOutputStream(new java.io.OutputStream() {
                // Null output stream
                @Override
                public void write(int b) throws IOException {
                    // Do nothing
                }

                @Override
                public void write(byte[] b, int off, int len) throws IOException {
                    // Do nothing
                }

                @Override
                public void write(byte[] b) throws IOException {
                    // Do nothing
                }
            }, null, TreeHeaderSize + nodes.size() * NodeSize);
            for (int i_leaf = nonLeafNodeCount, i = 0; i_leaf < nodes.size(); i_leaf++) {
                nodes.elementAt(i_leaf).offsetOfFirstElement = (int) fakeOut.getPos();
                if (i != nodes.elementAt(i_leaf).index1)
                    throw new RuntimeException();
                double x1, y1, x2, y2;

                // Initialize MBR to first object
                int eol = skipToEOL(element_bytes, offsets[i]);
                fakeOut.write(element_bytes, offsets[i], eol - offsets[i]);
                line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                stockObject.fromText(line);
                Rectangle mbr = stockObject.getMBR();
                x1 = mbr.x1;
                y1 = mbr.y1;
                x2 = mbr.x2;
                y2 = mbr.y2;
                i++;

                while (i < nodes.elementAt(i_leaf).index2) {
                    eol = skipToEOL(element_bytes, offsets[i]);
                    fakeOut.write(element_bytes, offsets[i], eol - offsets[i]);
                    line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                    stockObject.fromText(line);
                    mbr = stockObject.getMBR();
                    if (mbr.x1 < x1)
                        x1 = mbr.x1;
                    if (mbr.y1 < y1)
                        y1 = mbr.y1;
                    if (mbr.x2 > x2)
                        x2 = mbr.x2;
                    if (mbr.y2 > y2)
                        y2 = mbr.y2;
                    i++;
                }
                nodes.elementAt(i_leaf).set(x1, y1, x2, y2);
            }

        } finally {
            if (fakeOut != null)
                fakeOut.close();
        }

        // Calculate MBR and offsetOfFirstElement for non-leaves
        for (int i_node = nonLeafNodeCount - 1; i_node >= 0; i_node--) {
            int i_first_child = i_node * degree + 1;
            nodes.elementAt(i_node).offsetOfFirstElement = nodes.elementAt(i_first_child).offsetOfFirstElement;
            int i_child = 0;
            Rectangle mbr;
            mbr = nodes.elementAt(i_first_child + i_child);
            double x1 = mbr.x1;
            double y1 = mbr.y1;
            double x2 = mbr.x2;
            double y2 = mbr.y2;
            i_child++;

            while (i_child < degree) {
                mbr = nodes.elementAt(i_first_child + i_child);
                if (mbr.x1 < x1)
                    x1 = mbr.x1;
                if (mbr.y1 < y1)
                    y1 = mbr.y1;
                if (mbr.x2 > x2)
                    x2 = mbr.x2;
                if (mbr.y2 > y2)
                    y2 = mbr.y2;
                i_child++;
            }
            nodes.elementAt(i_node).set(x1, y1, x2, y2);
        }

        // Start writing the tree
        // write tree header (including size)
        // Total tree size. (== Total bytes written - 8 bytes for the size itself)
        dataOut.writeInt(TreeHeaderSize + NodeSize * nodeCount + len);
        // Tree height
        dataOut.writeInt(height);
        // Degree
        dataOut.writeInt(degree);
        dataOut.writeInt(elementCount);

        // write nodes
        for (SplitStruct node : nodes) {
            node.write(dataOut);
        }
        // write elements
        for (int element_i = 0; element_i < elementCount; element_i++) {
            int eol = skipToEOL(element_bytes, offsets[element_i]);
            dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]);
        }

    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.tools.rumen.Folder.java

public int run() throws IOException {
    class JobEntryComparator implements Comparator<Pair<LoggedJob, JobTraceReader>> {
        public int compare(Pair<LoggedJob, JobTraceReader> p1, Pair<LoggedJob, JobTraceReader> p2) {
            LoggedJob j1 = p1.first();/*w  ww  .j av  a  2  s .  c om*/
            LoggedJob j2 = p2.first();

            return (j1.getSubmitTime() < j2.getSubmitTime()) ? -1
                    : (j1.getSubmitTime() == j2.getSubmitTime()) ? 0 : 1;
        }
    }

    // we initialize an empty heap so if we take an error before establishing
    // a real one the finally code goes through
    Queue<Pair<LoggedJob, JobTraceReader>> heap = new PriorityQueue<Pair<LoggedJob, JobTraceReader>>();

    try {
        LoggedJob job = reader.nextJob();

        if (job == null) {
            LOG.error("The job trace is empty");

            return EMPTY_JOB_TRACE;
        }

        // If starts-after time is specified, skip the number of jobs till we reach
        // the starting time limit.
        if (startsAfter > 0) {
            LOG.info("starts-after time is specified. Initial job submit time : " + job.getSubmitTime());

            long approximateTime = job.getSubmitTime() + startsAfter;
            job = reader.nextJob();
            long skippedCount = 0;
            while (job != null && job.getSubmitTime() < approximateTime) {
                job = reader.nextJob();
                skippedCount++;
            }

            LOG.debug("Considering jobs with submit time greater than " + startsAfter + " ms. Skipped "
                    + skippedCount + " jobs.");

            if (job == null) {
                LOG.error("No more jobs to process in the trace with 'starts-after'" + " set to " + startsAfter
                        + "ms.");
                return EMPTY_JOB_TRACE;
            }
            LOG.info("The first job has a submit time of " + job.getSubmitTime());
        }

        firstJobSubmitTime = job.getSubmitTime();
        long lastJobSubmitTime = firstJobSubmitTime;

        int numberJobs = 0;

        long currentIntervalEnd = Long.MIN_VALUE;

        Path nextSegment = null;
        Outputter<LoggedJob> tempGen = null;

        if (debug) {
            LOG.debug("The first job has a submit time of " + firstJobSubmitTime);
        }

        final Configuration conf = getConf();

        try {
            // At the top of this loop, skewBuffer has at most
            // skewBufferLength entries.
            while (job != null) {
                final Random tempNameGenerator = new Random();

                lastJobSubmitTime = job.getSubmitTime();

                ++numberJobs;

                if (job.getSubmitTime() >= currentIntervalEnd) {
                    if (tempGen != null) {
                        tempGen.close();
                    }

                    nextSegment = null;
                    for (int i = 0; i < 3 && nextSegment == null; ++i) {
                        try {
                            nextSegment = new Path(tempDir,
                                    "segment-" + tempNameGenerator.nextLong() + ".json.gz");

                            if (debug) {
                                LOG.debug("The next segment name is " + nextSegment);
                            }

                            FileSystem fs = nextSegment.getFileSystem(conf);

                            try {
                                if (!fs.exists(nextSegment)) {
                                    break;
                                }

                                continue;
                            } catch (IOException e) {
                                // no code -- file did not already exist
                            }
                        } catch (IOException e) {
                            // no code -- file exists now, or directory bad. We try three
                            // times.
                        }
                    }

                    if (nextSegment == null) {
                        throw new RuntimeException("Failed to create a new file!");
                    }

                    if (debug) {
                        LOG.debug("Creating " + nextSegment + " for a job with a submit time of "
                                + job.getSubmitTime());
                    }

                    deletees.add(nextSegment);

                    tempPaths.add(nextSegment);

                    tempGen = new DefaultOutputter<LoggedJob>();
                    tempGen.init(nextSegment, conf);

                    long currentIntervalNumber = (job.getSubmitTime() - firstJobSubmitTime) / inputCycle;

                    currentIntervalEnd = firstJobSubmitTime + ((currentIntervalNumber + 1) * inputCycle);
                }

                // the temp files contain UDadjusted times, but each temp file's
                // content is in the same input cycle interval.
                if (tempGen != null) {
                    tempGen.output(job);
                }

                job = reader.nextJob();
            }
        } catch (DeskewedJobTraceReader.OutOfOrderException e) {
            return OUT_OF_ORDER_JOBS;
        } finally {
            if (tempGen != null) {
                tempGen.close();
            }
        }

        if (lastJobSubmitTime <= firstJobSubmitTime) {
            LOG.error("All of your job[s] have the same submit time." + "  Please just use your input file.");

            return ALL_JOBS_SIMULTANEOUS;
        }

        double submitTimeSpan = lastJobSubmitTime - firstJobSubmitTime;

        LOG.warn("Your input trace spans " + (lastJobSubmitTime - firstJobSubmitTime) + " ticks.");

        double foldingRatio = submitTimeSpan * (numberJobs + 1) / numberJobs / inputCycle;

        if (debug) {
            LOG.warn("run: submitTimeSpan = " + submitTimeSpan + ", numberJobs = " + numberJobs
                    + ", inputCycle = " + inputCycle);
        }

        if (reader.neededSkewBufferSize() > 0) {
            LOG.warn("You needed a -skew-buffer-length of " + reader.neededSkewBufferSize()
                    + " but no more, for this input.");
        }

        double tProbability = timeDilation * concentration / foldingRatio;

        if (debug) {
            LOG.warn("run: timeDilation = " + timeDilation + ", concentration = " + concentration
                    + ", foldingRatio = " + foldingRatio);
            LOG.warn("The transcription probability is " + tProbability);
        }

        transcriptionRateInteger = (int) Math.floor(tProbability);
        transcriptionRateFraction = tProbability - Math.floor(tProbability);

        // Now read all the inputs in parallel
        heap = new PriorityQueue<Pair<LoggedJob, JobTraceReader>>(tempPaths.size(), new JobEntryComparator());

        for (Path tempPath : tempPaths) {
            JobTraceReader thisReader = new JobTraceReader(tempPath, conf);

            closees.add(thisReader);

            LoggedJob streamFirstJob = thisReader.getNext();

            long thisIndex = (streamFirstJob.getSubmitTime() - firstJobSubmitTime) / inputCycle;

            if (debug) {
                LOG.debug("A job with submit time of " + streamFirstJob.getSubmitTime() + " is in interval # "
                        + thisIndex);
            }

            adjustJobTimes(streamFirstJob);

            if (debug) {
                LOG.debug("That job's submit time is adjusted to " + streamFirstJob.getSubmitTime());
            }

            heap.add(new Pair<LoggedJob, JobTraceReader>(streamFirstJob, thisReader));
        }

        Pair<LoggedJob, JobTraceReader> next = heap.poll();

        while (next != null) {
            maybeOutput(next.first());

            if (debug) {
                LOG.debug("The most recent job has an adjusted submit time of " + next.first().getSubmitTime());
                LOG.debug(" Its replacement in the heap will come from input engine " + next.second());
            }

            LoggedJob replacement = next.second().getNext();

            if (replacement == null) {
                next.second().close();

                if (debug) {
                    LOG.debug("That input engine is depleted.");
                }
            } else {
                adjustJobTimes(replacement);

                if (debug) {
                    LOG.debug("The replacement has an adjusted submit time of " + replacement.getSubmitTime());
                }

                heap.add(new Pair<LoggedJob, JobTraceReader>(replacement, next.second()));
            }

            next = heap.poll();
        }
    } finally {
        IOUtils.cleanup(null, reader);
        if (outGen != null) {
            outGen.close();
        }
        for (Pair<LoggedJob, JobTraceReader> heapEntry : heap) {
            heapEntry.second().close();
        }
        for (Closeable closee : closees) {
            closee.close();
        }
        if (!debug) {
            Configuration conf = getConf();

            for (Path deletee : deletees) {
                FileSystem fs = deletee.getFileSystem(conf);

                try {
                    fs.delete(deletee, false);
                } catch (IOException e) {
                    // no code
                }
            }
        }
    }

    return 0;
}

From source file:edu.umn.cs.spatialHadoop.core.RTree.java

/**
 * Builds the RTree given a serialized list of elements. It uses the given
 * stockObject to deserialize these elements using
 * {@link TextSerializable#fromText(Text)} and build the tree. Also writes the
 * created tree to the disk directly.//  ww w . j  a v  a2s  .  c om
 * 
 * @param element_bytes
 *          - serialization of all elements separated by new lines
 * @param offset
 *          - offset of the first byte to use in elements_bytes
 * @param len
 *          - number of bytes to use in elements_bytes
 * @param degree
 *          - Degree of the R-tree to build in terms of number of children per
 *          node
 * @param dataOut
 *          - output stream to write the result to.
 * @param fast_sort
 *          - setting this to <code>true</code> allows the method to run
 *          faster by materializing the offset of each element in the list
 *          which speeds up the comparison. However, this requires an
 *          additional 16 bytes per element. So, for each 1M elements, the
 *          method will require an additional 16 M bytes (approximately).
 */
public void bulkLoadWrite(final byte[] element_bytes, final int offset, final int len, final int degree,
        DataOutput dataOut, final boolean fast_sort) {
    try {

        // Count number of elements in the given text
        int i_start = offset;
        final Text line = new Text();
        while (i_start < offset + len) {
            int i_end = skipToEOL(element_bytes, i_start);
            // Extract the line without end of line character
            line.set(element_bytes, i_start, i_end - i_start - 1);
            stockObject.fromText(line);
            elementCount++;
            i_start = i_end;
        }
        LOG.info("Bulk loading an RTree with " + elementCount + " elements");

        // It turns out the findBestDegree returns the best degree when the whole
        // tree is loaded to memory when processed. However, as current algorithms
        // process the tree while it's on disk, a higher degree should be selected
        // such that a node fits one file block (assumed to be 4K).
        //final int degree = findBestDegree(bytesAvailable, elementCount);
        LOG.info("Writing an RTree with degree " + degree);

        int height = Math.max(1, (int) Math.ceil(Math.log(elementCount) / Math.log(degree)));
        int leafNodeCount = (int) Math.pow(degree, height - 1);
        if (elementCount < 2 * leafNodeCount && height > 1) {
            height--;
            leafNodeCount = (int) Math.pow(degree, height - 1);
        }
        int nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1));
        int nonLeafNodeCount = nodeCount - leafNodeCount;

        // Keep track of the offset of each element in the text
        final int[] offsets = new int[elementCount];
        final double[] xs = fast_sort ? new double[elementCount] : null;
        final double[] ys = fast_sort ? new double[elementCount] : null;

        i_start = offset;
        line.clear();
        for (int i = 0; i < elementCount; i++) {
            offsets[i] = i_start;
            int i_end = skipToEOL(element_bytes, i_start);
            if (xs != null) {
                // Extract the line with end of line character
                line.set(element_bytes, i_start, i_end - i_start - 1);
                stockObject.fromText(line);
                // Sample center of the shape
                xs[i] = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2;
                ys[i] = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2;
            }
            i_start = i_end;
        }

        /**A struct to store information about a split*/
        class SplitStruct extends Rectangle {
            /**Start and end index for this split*/
            int index1, index2;
            /**Direction of this split*/
            byte direction;
            /**Index of first element on disk*/
            int offsetOfFirstElement;

            static final byte DIRECTION_X = 0;
            static final byte DIRECTION_Y = 1;

            SplitStruct(int index1, int index2, byte direction) {
                this.index1 = index1;
                this.index2 = index2;
                this.direction = direction;
            }

            @Override
            public void write(DataOutput out) throws IOException {
                out.writeInt(offsetOfFirstElement);
                super.write(out);
            }

            void partition(Queue<SplitStruct> toBePartitioned) {
                IndexedSortable sortableX;
                IndexedSortable sortableY;

                if (fast_sort) {
                    // Use materialized xs[] and ys[] to do the comparisons
                    sortableX = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap xs
                            double tempx = xs[i];
                            xs[i] = xs[j];
                            xs[j] = tempx;
                            // Swap ys
                            double tempY = ys[i];
                            ys[i] = ys[j];
                            ys[j] = tempY;
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            if (xs[i] < xs[j])
                                return -1;
                            if (xs[i] > xs[j])
                                return 1;
                            return 0;
                        }
                    };

                    sortableY = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap xs
                            double tempx = xs[i];
                            xs[i] = xs[j];
                            xs[j] = tempx;
                            // Swap ys
                            double tempY = ys[i];
                            ys[i] = ys[j];
                            ys[j] = tempY;
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            if (ys[i] < ys[j])
                                return -1;
                            if (ys[i] > ys[j])
                                return 1;
                            return 0;
                        }
                    };
                } else {
                    // No materialized xs and ys. Always deserialize objects to compare
                    sortableX = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            // Get end of line
                            int eol = skipToEOL(element_bytes, offsets[i]);
                            line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                            stockObject.fromText(line);
                            double xi = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2;

                            eol = skipToEOL(element_bytes, offsets[j]);
                            line.set(element_bytes, offsets[j], eol - offsets[j] - 1);
                            stockObject.fromText(line);
                            double xj = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2;
                            if (xi < xj)
                                return -1;
                            if (xi > xj)
                                return 1;
                            return 0;
                        }
                    };

                    sortableY = new IndexedSortable() {
                        @Override
                        public void swap(int i, int j) {
                            // Swap id
                            int tempid = offsets[i];
                            offsets[i] = offsets[j];
                            offsets[j] = tempid;
                        }

                        @Override
                        public int compare(int i, int j) {
                            int eol = skipToEOL(element_bytes, offsets[i]);
                            line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                            stockObject.fromText(line);
                            double yi = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2;

                            eol = skipToEOL(element_bytes, offsets[j]);
                            line.set(element_bytes, offsets[j], eol - offsets[j] - 1);
                            stockObject.fromText(line);
                            double yj = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2;
                            if (yi < yj)
                                return -1;
                            if (yi > yj)
                                return 1;
                            return 0;
                        }
                    };
                }

                final IndexedSorter sorter = new QuickSort();

                final IndexedSortable[] sortables = new IndexedSortable[2];
                sortables[SplitStruct.DIRECTION_X] = sortableX;
                sortables[SplitStruct.DIRECTION_Y] = sortableY;

                sorter.sort(sortables[direction], index1, index2);

                // Partition into maxEntries partitions (equally) and
                // create a SplitStruct for each partition
                int i1 = index1;
                for (int iSplit = 0; iSplit < degree; iSplit++) {
                    int i2 = index1 + (index2 - index1) * (iSplit + 1) / degree;
                    SplitStruct newSplit = new SplitStruct(i1, i2, (byte) (1 - direction));
                    toBePartitioned.add(newSplit);
                    i1 = i2;
                }
            }
        }

        // All nodes stored in level-order traversal
        Vector<SplitStruct> nodes = new Vector<SplitStruct>();
        final Queue<SplitStruct> toBePartitioned = new LinkedList<SplitStruct>();
        toBePartitioned.add(new SplitStruct(0, elementCount, SplitStruct.DIRECTION_X));

        while (!toBePartitioned.isEmpty()) {
            SplitStruct split = toBePartitioned.poll();
            if (nodes.size() < nonLeafNodeCount) {
                // This is a non-leaf
                split.partition(toBePartitioned);
            }
            nodes.add(split);
        }

        if (nodes.size() != nodeCount) {
            throw new RuntimeException(
                    "Expected node count: " + nodeCount + ". Real node count: " + nodes.size());
        }

        // Now we have our data sorted in the required order. Start building
        // the tree.
        // Store the offset of each leaf node in the tree
        FSDataOutputStream fakeOut = null;
        try {
            fakeOut = new FSDataOutputStream(new java.io.OutputStream() {
                // Null output stream
                @Override
                public void write(int b) throws IOException {
                    // Do nothing
                }

                @Override
                public void write(byte[] b, int off, int len) throws IOException {
                    // Do nothing
                }

                @Override
                public void write(byte[] b) throws IOException {
                    // Do nothing
                }
            }, null, TreeHeaderSize + nodes.size() * NodeSize);
            for (int i_leaf = nonLeafNodeCount, i = 0; i_leaf < nodes.size(); i_leaf++) {
                nodes.elementAt(i_leaf).offsetOfFirstElement = (int) fakeOut.getPos();
                if (i != nodes.elementAt(i_leaf).index1)
                    throw new RuntimeException();
                double x1, y1, x2, y2;

                // Initialize MBR to first object
                int eol = skipToEOL(element_bytes, offsets[i]);
                fakeOut.write(element_bytes, offsets[i], eol - offsets[i]);
                line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                stockObject.fromText(line);
                Rectangle mbr = stockObject.getMBR();
                x1 = mbr.x1;
                y1 = mbr.y1;
                x2 = mbr.x2;
                y2 = mbr.y2;
                i++;

                while (i < nodes.elementAt(i_leaf).index2) {
                    eol = skipToEOL(element_bytes, offsets[i]);
                    fakeOut.write(element_bytes, offsets[i], eol - offsets[i]);
                    line.set(element_bytes, offsets[i], eol - offsets[i] - 1);
                    stockObject.fromText(line);
                    mbr = stockObject.getMBR();
                    if (mbr.x1 < x1)
                        x1 = mbr.x1;
                    if (mbr.y1 < y1)
                        y1 = mbr.y1;
                    if (mbr.x2 > x2)
                        x2 = mbr.x2;
                    if (mbr.y2 > y2)
                        y2 = mbr.y2;
                    i++;
                }
                nodes.elementAt(i_leaf).set(x1, y1, x2, y2);
            }

        } finally {
            if (fakeOut != null)
                fakeOut.close();
        }

        // Calculate MBR and offsetOfFirstElement for non-leaves
        for (int i_node = nonLeafNodeCount - 1; i_node >= 0; i_node--) {
            int i_first_child = i_node * degree + 1;
            nodes.elementAt(i_node).offsetOfFirstElement = nodes.elementAt(i_first_child).offsetOfFirstElement;
            int i_child = 0;
            Rectangle mbr;
            mbr = nodes.elementAt(i_first_child + i_child);
            double x1 = mbr.x1;
            double y1 = mbr.y1;
            double x2 = mbr.x2;
            double y2 = mbr.y2;
            i_child++;

            while (i_child < degree) {
                mbr = nodes.elementAt(i_first_child + i_child);
                if (mbr.x1 < x1)
                    x1 = mbr.x1;
                if (mbr.y1 < y1)
                    y1 = mbr.y1;
                if (mbr.x2 > x2)
                    x2 = mbr.x2;
                if (mbr.y2 > y2)
                    y2 = mbr.y2;
                i_child++;
            }
            nodes.elementAt(i_node).set(x1, y1, x2, y2);
        }

        // Start writing the tree
        // write tree header (including size)
        // Total tree size. (== Total bytes written - 8 bytes for the size itself)
        dataOut.writeInt(TreeHeaderSize + NodeSize * nodeCount + len);
        // Tree height
        dataOut.writeInt(height);
        // Degree
        dataOut.writeInt(degree);
        dataOut.writeInt(elementCount);

        // write nodes
        for (SplitStruct node : nodes) {
            node.write(dataOut);
        }
        // write elements
        for (int element_i = 0; element_i < elementCount; element_i++) {
            int eol = skipToEOL(element_bytes, offsets[element_i]);
            dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]);
        }

    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.java

private void walkASTMarkTABREF(ASTNode ast, Set<String> cteAlias) throws SemanticException {
    Queue<Node> queue = new LinkedList<>();
    queue.add(ast);/*from   www  . ja  v a  2s .  com*/
    Map<HivePrivilegeObject, MaskAndFilterInfo> basicInfos = new LinkedHashMap<>();
    while (!queue.isEmpty()) {
        ASTNode astNode = (ASTNode) queue.poll();
        if (astNode.getToken().getType() == HiveParser.TOK_TABREF) {
            int aliasIndex = 0;
            StringBuilder additionalTabInfo = new StringBuilder();
            for (int index = 1; index < astNode.getChildCount(); index++) {
                ASTNode ct = (ASTNode) astNode.getChild(index);
                if (ct.getToken().getType() == HiveParser.TOK_TABLEBUCKETSAMPLE
                        || ct.getToken().getType() == HiveParser.TOK_TABLESPLITSAMPLE
                        || ct.getToken().getType() == HiveParser.TOK_TABLEPROPERTIES) {
                    additionalTabInfo.append(ctx.getTokenRewriteStream().toString(ct.getTokenStartIndex(),
                            ct.getTokenStopIndex()));
                } else {
                    aliasIndex = index;
                }
            }

            ASTNode tableTree = (ASTNode) (astNode.getChild(0));

            String tabIdName = getUnescapedName(tableTree);

            String alias;
            if (aliasIndex != 0) {
                alias = unescapeIdentifier(astNode.getChild(aliasIndex).getText());
            } else {
                alias = getUnescapedUnqualifiedTableName(tableTree);
            }

            // We need to know if it is CTE or not.
            // A CTE may have the same name as a table.
            // For example,
            // with select TAB1 [masking] as TAB2
            // select * from TAB2 [no masking]
            if (cteAlias.contains(tabIdName)) {
                continue;
            }

            String replacementText = null;
            Table table = null;
            try {
                table = getTableObjectByName(tabIdName);
            } catch (HiveException e) {
                // Table may not be found when materialization of CTE is on.
                LOG.info("Table " + tabIdName + " is not found in walkASTMarkTABREF.");
                continue;
            }

            List<String> colNames = new ArrayList<>();
            List<String> colTypes = new ArrayList<>();
            for (FieldSchema col : table.getAllCols()) {
                colNames.add(col.getName());
                colTypes.add(col.getType());
            }

            basicInfos.put(new HivePrivilegeObject(table.getDbName(), table.getTableName(), colNames),
                    new MaskAndFilterInfo(colTypes, additionalTabInfo.toString(), alias, astNode,
                            table.isView()));
        }
        if (astNode.getChildCount() > 0 && !ignoredTokens.contains(astNode.getToken().getType())) {
            for (Node child : astNode.getChildren()) {
                queue.offer(child);
            }
        }
    }
    List<HivePrivilegeObject> basicPrivObjs = new ArrayList<>();
    basicPrivObjs.addAll(basicInfos.keySet());
    List<HivePrivilegeObject> needRewritePrivObjs = tableMask.applyRowFilterAndColumnMasking(basicPrivObjs);
    if (needRewritePrivObjs != null && !needRewritePrivObjs.isEmpty()) {
        for (HivePrivilegeObject privObj : needRewritePrivObjs) {
            MaskAndFilterInfo info = basicInfos.get(privObj);
            String replacementText = tableMask.create(privObj, info);
            if (replacementText != null) {
                // We don't support masking/filtering against ACID query at the moment
                if (ctx.getIsUpdateDeleteMerge()) {
                    throw new SemanticException(ErrorMsg.MASKING_FILTERING_ON_ACID_NOT_SUPPORTED,
                            privObj.getDbname(), privObj.getObjectName());
                }
                tableMask.setNeedsRewrite(true);
                tableMask.addTranslation(info.astNode, replacementText);
            }
        }
    }
}