Example usage for java.util Deque isEmpty

List of usage examples for java.util Deque isEmpty

Introduction

In this page you can find the example usage for java.util Deque isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely bulk load region targets.
 *//*from ww  w  .  ja va2  s  .c o  m*/
private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);

    // drain LQIs and figure out bulk load groups
    Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<List<LoadQueueItem>> lqis : splittingFutures) {
        try {
            List<LoadQueueItem> splits = lqis.get();
            if (splits != null) {
                queue.addAll(splits);
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw new IllegalStateException(e1);
        }
    }
    return regionGroups;
}

From source file:org.apache.pig.builtin.Utf8StorageConverter.java

private Tuple consumeTuple(PushbackInputStream in, ResourceFieldSchema fieldSchema) throws IOException {
    if (fieldSchema == null) {
        throw new IOException("Schema is null");
    }/*  w w w . j av a  2 s . c  om*/
    int buf;
    ByteArrayOutputStream mOut;

    while ((buf = in.read()) != '(' || buf == '}') {
        if (buf == -1) {
            throw new IOException("Unexpect end of tuple");
        }
        if (buf == '}') {
            in.unread(buf);
            return null;
        }
    }
    Tuple t = TupleFactory.getInstance().newTuple();
    if (fieldSchema.getSchema() != null && fieldSchema.getSchema().getFields().length != 0) {
        ResourceFieldSchema[] fss = fieldSchema.getSchema().getFields();
        // Interpret item inside tuple one by one based on the inner schema
        for (int i = 0; i < fss.length; i++) {
            Object field;
            ResourceFieldSchema fs = fss[i];
            int delimit = ',';
            if (i == fss.length - 1)
                delimit = ')';

            if (DataType.isComplex(fs.getType())) {
                field = consumeComplexType(in, fs);
                while ((buf = in.read()) != delimit) {
                    if (buf == -1) {
                        throw new IOException("Unexpect end of tuple");
                    }
                }
            } else {
                mOut = new ByteArrayOutputStream(BUFFER_SIZE);
                while ((buf = in.read()) != delimit) {
                    if (buf == -1) {
                        throw new IOException("Unexpect end of tuple");
                    }
                    if (buf == delimit)
                        break;
                    mOut.write(buf);
                }
                field = parseSimpleType(mOut.toByteArray(), fs);
            }
            t.append(field);
        }
    } else {
        // No inner schema, treat everything inside tuple as bytearray
        Deque<Character> level = new LinkedList<Character>(); // keep track of nested tuple/bag/map. We do not interpret, save them as bytearray
        mOut = new ByteArrayOutputStream(BUFFER_SIZE);
        while (true) {
            buf = in.read();
            if (buf == -1) {
                throw new IOException("Unexpect end of tuple");
            }
            if (buf == '[' || buf == '{' || buf == '(') {
                level.push((char) buf);
                mOut.write(buf);
            } else if (buf == ')' && level.isEmpty()) // End of tuple
            {
                DataByteArray value = new DataByteArray(mOut.toByteArray());
                t.append(value);
                break;
            } else if (buf == ',' && level.isEmpty()) {
                DataByteArray value = new DataByteArray(mOut.toByteArray());
                t.append(value);
                mOut.reset();
            } else if (buf == ']' || buf == '}' || buf == ')') {
                if (level.peek() == findStartChar((char) buf))
                    level.pop();
                else
                    throw new IOException("Malformed tuple");
                mOut.write(buf);
            } else
                mOut.write(buf);
        }
    }
    return t;
}

From source file:amfservices.actions.PGServicesAction.java

public Map<String, Object> takeRandomizePrizeAction(String uid, long now) {
    UserTempData uTempData = UserTempData.getTempData(uid);

    int nTurn = PGHelper.toInteger(uTempData.getData(PGMacro.RAND_PRIZE_TURN));
    PGException.Assert(nTurn > 0, PGError.NOT_ENOUGH_RP_TURN, "You have 0 turn");

    // reduce turn
    --nTurn;//from  w w w  .  jav a2 s.com
    uTempData.setData(PGMacro.RAND_PRIZE_TURN, nTurn);

    String prizeID = PGConfig.inst().getRandomizePrizes().randomPrize();

    CFRandomizePrize.Prize prizeData = PGConfig.inst().getRandomizePrizes().get(prizeID);
    if (prizeData.isAutoPrize()) {
        PGPrize prize = PrizeFactory.getPrize(prizeData.getPrize());
        EntityContext context = EntityContext.getContext(uid);
        Map<String, Object> pzDesc = prize.award(context, now);
        context.saveToDB();

        // find total gold prized:
        Deque<Map<String, Object>> pzStack = new ArrayDeque();
        int totalGoldPrized = 0;
        pzStack.add(prizeData.getPrize());
        while (!pzStack.isEmpty()) {
            Map<String, Object> pz = pzStack.pollLast();
            for (Map.Entry<String, Object> pzEntry : pz.entrySet()) {
                String pzKey = pzEntry.getKey();
                Object pzVal = pzEntry.getValue();

                if (pzVal instanceof Map) {
                    pzStack.addLast((Map) pzVal);
                } else if ("gold".equals(pzKey)) {
                    totalGoldPrized += PGHelper.toInteger(pzVal);
                }
            }
        }

        if (totalGoldPrized > 0) {
            QuestLogger qLogger = QuestServices.inst().getQuestLogger(uid, now);
            qLogger.log(new GoldDialRecord(totalGoldPrized));
        }

        return AMFBuilder.make(PGMacro.RAND_PRIZE_ID, prizeID, PGMacro.PRIZE, pzDesc);
    } else {
        String giftID = GiftServices.inst().sendGift(Arrays.asList(new String[] { uid }), prizeData.getPrize(),
                now, PGConfig.inst().temp().RandomizePrize_Expire()).getGiftID();

        return AMFBuilder.make(PGMacro.RAND_PRIZE_ID, prizeID, PGMacro.GIFT_ID, giftID);
    }
}

From source file:org.alfresco.repo.content.transform.TransformerDebug.java

private void pop(Call callType, boolean suppressFinish) {
    Deque<Frame> ourStack = ThreadInfo.getStack();
    if (!ourStack.isEmpty()) {
        Frame frame = ourStack.peek();

        if ((frame.callType == callType)
                || (frame.callType == Call.AVAILABLE_AND_TRANSFORM && callType == Call.AVAILABLE)) {
            int size = ourStack.size();
            String ms = ms(System.currentTimeMillis() - frame.start);

            logInfo(frame, size, ms);//w  w w. j av a  2  s  .  c  om

            boolean firstLevel = size == 1;
            if (!suppressFinish && (firstLevel || logger.isTraceEnabled())) {
                log(FINISHED_IN + ms + (frame.callType == Call.AVAILABLE ? " Transformer NOT called" : "")
                        + (firstLevel ? "\n" : ""), firstLevel);
            }

            setDebugOutput(frame.origDebugOutput);
            ourStack.pop();
        }
    }
}

From source file:org.alfresco.repo.content.transform.TransformerDebug.java

/**
 * Log a message prefixed with the current transformation reference
 * and include a exception, suppressing the stack trace if repeated
 * as we return up the stack of transformers.
 * @param message//  w  w  w.  j a v  a2s .co m
 */
public void debug(String message, Throwable t) {
    if (isEnabled()) {
        // Trim messages of the form: "Failed... : \n   reader:...\n    writer:..."
        String msg = t.getMessage();
        if (msg != null) {
            int i = msg.indexOf(": \n");
            if (i != -1) {
                msg = msg.substring(0, i);
            }
            log(message + ' ' + msg);
        } else {
            log(message);
        }

        Deque<Frame> ourStack = ThreadInfo.getStack();
        if (!ourStack.isEmpty()) {
            Frame frame = ourStack.peek();
            frame.setFailureReason(message + ' ' + getRootCauseMessage(t));
        }
    }
}

From source file:org.alfresco.repo.content.transform.TransformerDebug.java

/**
 * Called to identify a transformer that cannot be used during working out
 * available transformers./*from   ww  w  .  ja  v  a 2  s . com*/
 */
public void unavailableTransformer(ContentTransformer transformer, String sourceMimetype, String targetMimetype,
        long maxSourceSizeKBytes) {
    if (isEnabled()) {
        Deque<Frame> ourStack = ThreadInfo.getStack();
        Frame frame = ourStack.peek();

        if (frame != null) {
            Deque<String> isTransformableStack = ThreadInfo.getIsTransformableStack();
            String name = (!isTransformableStack.isEmpty()) ? isTransformableStack.getFirst()
                    : getName(transformer);
            boolean debug = (maxSourceSizeKBytes != 0);
            if (frame.unavailableTransformers == null) {
                frame.unavailableTransformers = new TreeSet<UnavailableTransformer>();
            }
            String priority = gePriority(transformer, sourceMimetype, targetMimetype);
            frame.unavailableTransformers
                    .add(new UnavailableTransformer(name, priority, maxSourceSizeKBytes, debug));
        }
    }
}

From source file:org.apache.hadoop.hive.ql.parse.PTFTranslator.java

private void translatePTFChain() throws SemanticException {

    Deque<PTFInputSpec> ptfChain = new ArrayDeque<PTFInvocationSpec.PTFInputSpec>();
    PTFInputSpec currentSpec = ptfInvocation.getFunction();
    while (currentSpec != null) {
        ptfChain.push(currentSpec);//from   ww  w  . j av a 2s  .  c  o m
        currentSpec = currentSpec.getInput();
    }

    int inputNum = 0;
    PTFInputDef currentDef = null;
    while (!ptfChain.isEmpty()) {
        currentSpec = ptfChain.pop();

        if (currentSpec instanceof PTFQueryInputSpec) {
            currentDef = translate((PTFQueryInputSpec) currentSpec, inputNum);
        } else {
            currentDef = translate((PartitionedTableFunctionSpec) currentSpec, currentDef, inputNum);
        }
        inputNum++;
    }
    ptfDesc.setFuncDef((PartitionedTableFunctionDef) currentDef);
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely
 * bulk load region targets.// w w  w . j ava2  s  .  c o m
 */
private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);

    // drain LQIs and figure out bulk load groups
    Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results.  All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<List<LoadQueueItem>> lqis : splittingFutures) {
        try {
            List<LoadQueueItem> splits = lqis.get();
            if (splits != null) {
                queue.addAll(splits);
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
    return regionGroups;
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * Perform a bulk load of the given directory into the given pre-existing table. This method is
 * not threadsafe./*from  w w  w  . ja  va2s. c  om*/
 * @param map map of family to List of hfiles
 * @param admin the Admin
 * @param table the table to load into
 * @param regionLocator region locator
 * @param silence true to ignore unmatched column families
 * @param copyFile always copy hfiles if true
 * @throws TableNotFoundException if table does not yet exist
 */
public Map<LoadQueueItem, ByteBuffer> doBulkLoad(Map<byte[], List<Path>> map, final Admin admin, Table table,
        RegionLocator regionLocator, boolean silence, boolean copyFile)
        throws TableNotFoundException, IOException {
    if (!admin.isTableAvailable(regionLocator.getName())) {
        throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
    }
    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new ArrayDeque<>();
    ExecutorService pool = null;
    SecureBulkLoadClient secureClient = null;
    try {
        prepareHFileQueue(map, table, queue, silence);
        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not get any files to load");
            return Collections.emptyMap();
        }
        pool = createExecutorService();
        secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
        return performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
    } finally {
        cleanup(admin, queue, pool, secureClient);
    }
}