Example usage for java.util Deque size

List of usage examples for java.util Deque size

Introduction

In this page you can find the example usage for java.util Deque size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this deque.

Usage

From source file:org.alfresco.repo.content.transform.TransformerDebug.java

private void push(String transformerName, String fromUrl, String sourceMimetype, String targetMimetype,
        long sourceSize, TransformationOptions options, Call callType) {
    Deque<Frame> ourStack = ThreadInfo.getStack();
    Frame frame = ourStack.peek();

    if (callType == Call.TRANSFORM && frame != null && frame.callType == Call.AVAILABLE) {
        frame.setTransformerName(transformerName);
        frame.setSourceSize(sourceSize);
        frame.callType = Call.AVAILABLE_AND_TRANSFORM;
    }/*from www .j a v a 2  s  . c  o m*/

    // Create a new frame. Logging level is set to trace if the file size is 0
    boolean origDebugOutput = ThreadInfo.setDebugOutput(ThreadInfo.getDebugOutput() && sourceSize != 0);
    frame = new Frame(frame, transformerName, fromUrl, sourceMimetype, targetMimetype, sourceSize, options,
            callType, origDebugOutput);
    ourStack.push(frame);

    if (callType == Call.TRANSFORM) {
        // Log the basic info about this transformation
        logBasicDetails(frame, sourceSize, options.getUse(), transformerName, (ourStack.size() == 1));
    }
}

From source file:org.alfresco.repo.content.transform.TransformerDebug.java

/**
 * Called once all available transformers have been identified.
 *//*from  w  w w.java  2s.  co  m*/
public void availableTransformers(List<ContentTransformer> transformers, long sourceSize,
        TransformationOptions options, String calledFrom) {
    if (isEnabled()) {
        Deque<Frame> ourStack = ThreadInfo.getStack();
        Frame frame = ourStack.peek();
        boolean firstLevel = ourStack.size() == 1;

        // Override setDebugOutput(false) to allow debug when there are transformers but they are all unavailable
        // Note once turned on we don't turn it off again.
        if (transformers.size() == 0) {
            frame.setFailureReason(NO_TRANSFORMERS);
            if (frame.unavailableTransformers != null && frame.unavailableTransformers.size() != 0) {
                ThreadInfo.setDebugOutput(true);
            }
        }
        frame.setSourceSize(sourceSize);

        // Log the basic info about this transformation
        logBasicDetails(frame, sourceSize, options.getUse(),
                calledFrom + ((transformers.size() == 0) ? " NO transformers" : ""), firstLevel);

        // Report available and unavailable transformers
        char c = 'a';
        int longestNameLength = getLongestTransformerNameLength(transformers, frame);
        for (ContentTransformer trans : transformers) {
            String name = getName(trans);
            int padName = longestNameLength - name.length() + 1;
            long maxSourceSizeKBytes = trans.getMaxSourceSizeKBytes(frame.sourceMimetype, frame.targetMimetype,
                    frame.options);
            String size = maxSourceSizeKBytes > 0 ? "< " + fileSize(maxSourceSizeKBytes * 1024) : "";
            int padSize = 10 - size.length();
            String priority = gePriority(trans, frame.sourceMimetype, frame.targetMimetype);
            log((c == 'a' ? "**" : "  ") + (c++) + ") " + priority + ' ' + name + spaces(padName) + size
                    + spaces(padSize)
                    + ms(trans.getTransformationTime(frame.sourceMimetype, frame.targetMimetype)));
        }
        if (frame.unavailableTransformers != null) {
            for (UnavailableTransformer unavailable : frame.unavailableTransformers) {
                int pad = longestNameLength - unavailable.name.length();
                String reason = "> " + fileSize(unavailable.maxSourceSizeKBytes * 1024);
                if (unavailable.debug || logger.isTraceEnabled()) {
                    log("--" + (c++) + ") " + unavailable.priority + ' ' + unavailable.name + spaces(pad + 1)
                            + reason, unavailable.debug);
                }
            }
        }
    }
}

From source file:org.alfresco.repo.content.transform.TransformerDebug.java

private void pop(Call callType, boolean suppressFinish) {
    Deque<Frame> ourStack = ThreadInfo.getStack();
    if (!ourStack.isEmpty()) {
        Frame frame = ourStack.peek();

        if ((frame.callType == callType)
                || (frame.callType == Call.AVAILABLE_AND_TRANSFORM && callType == Call.AVAILABLE)) {
            int size = ourStack.size();
            String ms = ms(System.currentTimeMillis() - frame.start);

            logInfo(frame, size, ms);//  ww  w  . j ava  2s. c o m

            boolean firstLevel = size == 1;
            if (!suppressFinish && (firstLevel || logger.isTraceEnabled())) {
                log(FINISHED_IN + ms + (frame.callType == Call.AVAILABLE ? " Transformer NOT called" : "")
                        + (firstLevel ? "\n" : ""), firstLevel);
            }

            setDebugOutput(frame.origDebugOutput);
            ourStack.pop();
        }
    }
}

From source file:org.apache.asterix.om.typecomputer.impl.RecordRemoveFieldsTypeComputer.java

/**
 * Comparison elements of two paths/*from  w  w w  .  j a va 2 s  . c om*/
 * Note: l2 uses a LIFO insert and removal.
 */
private <E> boolean isEqualPaths(List<E> l1, Deque<E> l2) {
    if ((l1 == null) || (l2 == null)) {
        return false;
    }

    if (l1.size() != l2.size()) {
        return false;
    }

    Iterator<E> it2 = l2.iterator();

    int len = l1.size();
    for (int i = len - 1; i >= 0; i--) {
        E o1 = l1.get(i);
        E o2 = it2.next();
        if (!o1.equals(o2)) {
            return false;
        }
    }
    return true;
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * Perform a bulk load of the given directory into the given pre-existing table. This method is
 * not threadsafe.//from w w  w.ja  va 2s .  co m
 * @param hfofDir the directory that was provided as the output path of a job using
 *          HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getTableName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = cfg.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = cfg.getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat//w w  w  . j  a v  a 2s  .  c  om
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = getConf().getInt("hbase.loadincremental.threads.max",
            Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        // check whether there is invalid family name in HFiles to be bulkloaded
        Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
        ArrayList<String> familyNames = new ArrayList<String>();
        for (HColumnDescriptor family : families) {
            familyNames.add(family.getNameAsString());
        }
        ArrayList<String> unmatchedFamilies = new ArrayList<String>();
        for (LoadQueueItem lqi : queue) {
            String familyNameInHFile = Bytes.toString(lqi.family);
            if (!familyNames.contains(familyNameInHFile)) {
                unmatchedFamilies.add(familyNameInHFile);
            }
        }
        if (unmatchedFamilies.size() > 0) {
            String msg = "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
                    + unmatchedFamilies + "; valid family names of table "
                    + Bytes.toString(table.getTableName()) + " are: " + familyNames;
            LOG.error(msg);
            throw new IOException(msg);
        }
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        //If using secure bulk load, get source delegation token, and
        //prepare staging directory and token
        if (userProvider.isHBaseSecurityEnabled()) {
            // fs is the source filesystem
            fsDelegationToken.acquireDelegationToken(fs);

            bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName());
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
                // Error is logged inside checkHFilesCountPerRegionPerFamily.
                throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
                        + " hfiles to one family of one region");
            }

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        if (userProvider.isHBaseSecurityEnabled()) {
            fsDelegationToken.releaseDelegationToken();

            if (bulkToken != null) {
                new SecureBulkLoadClient(table).cleanupBulkLoad(bulkToken);
            }
        }
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }

    if (queue != null && !queue.isEmpty()) {
        throw new RuntimeException(
                "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
    }
}

From source file:org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.java

/**
 * Store the current region loads./*from  www. jav  a  2  s.co m*/
 */
private synchronized void updateRegionLoad() {
    // We create a new hashmap so that regions that are no longer there are removed.
    // However we temporarily need the old loads so we can use them to keep the rolling average.
    Map<String, Deque<RegionLoad>> oldLoads = loads;
    loads = new HashMap<String, Deque<RegionLoad>>();

    for (ServerName sn : clusterStatus.getServers()) {
        ServerLoad sl = clusterStatus.getLoad(sn);
        if (sl == null) {
            continue;
        }
        for (Entry<byte[], RegionLoad> entry : sl.getRegionsLoad().entrySet()) {
            Deque<RegionLoad> rLoads = oldLoads.get(Bytes.toString(entry.getKey()));
            if (rLoads == null) {
                // There was nothing there
                rLoads = new ArrayDeque<RegionLoad>();
            } else if (rLoads.size() >= 15) {
                rLoads.remove();
            }
            rLoads.add(entry.getValue());
            loads.put(Bytes.toString(entry.getKey()), rLoads);

        }
    }

    for (CostFromRegionLoadFunction cost : regionLoadFunctions) {
        cost.setLoads(loads);
    }
}

From source file:org.apache.hadoop.hbase.replication.regionserver.HFileReplicator.java

private void doBulkLoad(LoadIncrementalHFiles loadHFiles, Table table, Deque<LoadQueueItem> queue,
        RegionLocator locator, int maxRetries) throws IOException {
    int count = 0;
    Pair<byte[][], byte[][]> startEndKeys;
    while (!queue.isEmpty()) {
        // need to reload split keys each iteration.
        startEndKeys = locator.getStartEndKeys();
        if (count != 0) {
            LOG.warn("Error occured while replicating HFiles, retry attempt " + count + " with " + queue.size()
                    + " files still remaining to replicate.");
        }/*ww w  .ja  v  a2s. c o m*/

        if (maxRetries != 0 && count >= maxRetries) {
            throw new IOException("Retry attempted " + count + " times without completing, bailing out.");
        }
        count++;

        // Try bulk load
        loadHFiles.loadHFileQueue(table, connection, queue, startEndKeys);
    }
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

private Map<LoadQueueItem, ByteBuffer> performBulkLoad(Admin admin, Table table, RegionLocator regionLocator,
        Deque<LoadQueueItem> queue, ExecutorService pool, SecureBulkLoadClient secureClient, boolean copyFile)
        throws IOException {
    int count = 0;

    fsDelegationToken.acquireDelegationToken(queue.peek().getFilePath().getFileSystem(getConf()));
    bulkToken = secureClient.prepareBulkLoad(admin.getConnection());
    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = null;

    Map<LoadQueueItem, ByteBuffer> item2RegionMap = new HashMap<>();
    // Assumes that region splits can happen while this occurs.
    while (!queue.isEmpty()) {
        // need to reload split keys each iteration.
        final Pair<byte[][], byte[][]> startEndKeys = regionLocator.getStartEndKeys();
        if (count != 0) {
            LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + queue.size()
                    + " files remaining to group or split");
        }/*  w w  w . ja v a 2s.c  o  m*/

        int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
        maxRetries = Math.max(maxRetries, startEndKeys.getFirst().length + 1);
        if (maxRetries != 0 && count >= maxRetries) {
            throw new IOException("Retry attempted " + count + " times without completing, bailing out");
        }
        count++;

        // Using ByteBuffer for byte[] equality semantics
        pair = groupOrSplitPhase(table, pool, queue, startEndKeys);
        Multimap<ByteBuffer, LoadQueueItem> regionGroups = pair.getFirst();

        if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
            // Error is logged inside checkHFilesCountPerRegionPerFamily.
            throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
                    + " hfiles to one family of one region");
        }

        bulkLoadPhase(table, admin.getConnection(), pool, queue, regionGroups, copyFile, item2RegionMap);

        // NOTE: The next iteration's split / group could happen in parallel to
        // atomic bulkloads assuming that there are splits and no merges, and
        // that we can atomically pull out the groups we want to retry.
    }

    if (!queue.isEmpty()) {
        throw new RuntimeException(
                "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
    }
    return item2RegionMap;
}

From source file:org.apache.hadoop.hive.ql.QTestUtil.java

/**
 * Given the current configurations (e.g., hadoop version and execution mode), return
 * the correct file name to compare with the current test run output.
 * @param outDir The directory where the reference log files are stored.
 * @param testName The test file name (terminated by ".out").
 * @return The file name appended with the configuration values if it exists.
 *//*from   w ww .  j  a  v  a2s.  co  m*/
public String outPath(String outDir, String testName) {
    String ret = (new File(outDir, testName)).getPath();
    // List of configurations. Currently the list consists of hadoop version and execution mode only
    List<String> configs = new ArrayList<String>();
    configs.add(this.hadoopVer);

    Deque<String> stack = new LinkedList<String>();
    StringBuilder sb = new StringBuilder();
    sb.append(testName);
    stack.push(sb.toString());

    // example file names are input1.q.out_0.20.0_minimr or input2.q.out_0.17
    for (String s : configs) {
        sb.append('_');
        sb.append(s);
        stack.push(sb.toString());
    }
    while (stack.size() > 0) {
        String fileName = stack.pop();
        File f = new File(outDir, fileName);
        if (f.exists()) {
            ret = f.getPath();
            break;
        }
    }
    return ret;
}