Example usage for java.util Deque isEmpty

List of usage examples for java.util Deque isEmpty

Introduction

In this page you can find the example usage for java.util Deque isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * Perform a bulk load of the given directory into the given pre-existing table. This method is
 * not threadsafe.// ww w.  j  a  v a  2  s  .com
 * @param hfofDir the directory that was provided as the output path of a job using
 *          HFileOutputFormat
 * @param admin the Admin
 * @param table the table to load into
 * @param regionLocator region locator
 * @param silence true to ignore unmatched column families
 * @param copyFile always copy hfiles if true
 * @throws TableNotFoundException if table does not yet exist
 */
public Map<LoadQueueItem, ByteBuffer> doBulkLoad(Path hfofDir, final Admin admin, Table table,
        RegionLocator regionLocator, boolean silence, boolean copyFile)
        throws TableNotFoundException, IOException {
    if (!admin.isTableAvailable(regionLocator.getName())) {
        throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
    }

    /*
     * Checking hfile format is a time-consuming operation, we should have an option to skip this
     * step when bulkloading millions of HFiles. See HBASE-13985.
     */
    boolean validateHFile = getConf().getBoolean("hbase.loadincremental.validate.hfile", true);
    if (!validateHFile) {
        LOG.warn("You are skipping HFiles validation, it might cause some data loss if files "
                + "are not correct. If you fail to read data from your table after using this "
                + "option, consider removing the files and bulkload again without this option. "
                + "See HBASE-13985");
    }
    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new ArrayDeque<>();
    ExecutorService pool = null;
    SecureBulkLoadClient secureClient = null;
    try {
        prepareHFileQueue(hfofDir, table, queue, validateHFile, silence);

        if (queue.isEmpty()) {
            LOG.warn(
                    "Bulk load operation did not find any files to load in directory {}. "
                            + "Does it contain files in subdirectories that correspond to column family names?",
                    (hfofDir != null ? hfofDir.toUri().toString() : ""));
            return Collections.emptyMap();
        }
        pool = createExecutorService();
        secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
        return performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
    } finally {
        cleanup(admin, queue, pool, secureClient);
    }
}

From source file:org.sybila.parasim.computation.verification.stl.cpu.AbstractUnaryTemporalMonitor.java

private List<Robustness> precomputeRobustness(Monitor subMonitor, FormulaInterval interval) {
    Deque<Robustness> lemireDeque = new LemireDeque<>(createComparator());
    List<Robustness> precomputed = new ArrayList<>();
    Iterator<Robustness> window = subMonitor.iterator();
    Iterator<Robustness> current = subMonitor.iterator();
    int currentIndex = 0;
    float currentTime = current.next().getTime();
    while (window.hasNext()) {
        Robustness memory = null;//w  w w  .  ja  va2s  .c om
        boolean windowEndReached = false;
        // push new points
        while (window.hasNext() && !windowEndReached) {
            memory = window.next();
            // check whether the time upper bound is reached
            if (memory.getTime() < currentTime + interval.getUpperBound()) {
                lemireDeque.offer(memory);
                memory = null;
            } else if (memory.getTime() == currentTime + interval.getUpperBound()) {
                lemireDeque.offer(memory);
                memory = null;
                windowEndReached = true;
            } else {
                windowEndReached = true;
            }
        }
        // check whether the window end has been reached
        if (!windowEndReached) {
            return precomputed;
        }
        // remove useless points
        while (!lemireDeque.isEmpty()
                && lemireDeque.peekFirst().getTime() < currentTime + interval.getLowerBound()) {
            lemireDeque.remove();
        }
        // get the first robustness in deque
        Robustness found = lemireDeque.peekFirst();
        precomputed.add(new SimpleRobustness(found.getValue(), currentTime, getProperty()));
        currentIndex++;
        currentTime = current.next().getTime();
        if (memory != null) {
            lemireDeque.offer(memory);
            memory = null;
        }
    }
    return precomputed;
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

private void cleanup(Admin admin, Deque<LoadQueueItem> queue, ExecutorService pool,
        SecureBulkLoadClient secureClient) throws IOException {
    fsDelegationToken.releaseDelegationToken();
    if (bulkToken != null && secureClient != null) {
        secureClient.cleanupBulkLoad(admin.getConnection(), bulkToken);
    }//from  w  ww .  j  a v a2 s. co  m
    if (pool != null) {
        pool.shutdown();
    }
    if (!queue.isEmpty()) {
        StringBuilder err = new StringBuilder();
        err.append("-------------------------------------------------\n");
        err.append("Bulk load aborted with some files not yet loaded:\n");
        err.append("-------------------------------------------------\n");
        for (LoadQueueItem q : queue) {
            err.append("  ").append(q.getFilePath()).append('\n');
        }
        LOG.error(err.toString());
    }
}

From source file:org.nuxeo.ecm.core.storage.sql.PersistenceContext.java

/**
 * Removes a property node and its children.
 * <p>//from   w w  w .  ja  va 2s  .  c o  m
 * There's less work to do than when we have to remove a generic document
 * node (less selections, and we can assume the depth is small so recurse).
 */
public void removePropertyNode(SimpleFragment hierFragment) throws StorageException {
    // collect children
    Deque<SimpleFragment> todo = new LinkedList<SimpleFragment>();
    List<SimpleFragment> children = new LinkedList<SimpleFragment>();
    todo.add(hierFragment);
    while (!todo.isEmpty()) {
        SimpleFragment fragment = todo.removeFirst();
        todo.addAll(getChildren(fragment.getId(), null, true)); // complex
        children.add(fragment);
    }
    Collections.reverse(children);
    // iterate on children depth first
    for (SimpleFragment fragment : children) {
        // remove from context
        boolean primary = fragment == hierFragment;
        removeFragmentAndDependents(fragment, primary);
        // remove from selections
        // removed from its parent selection
        hierComplex.recordRemoved(fragment);
        // no children anymore
        hierComplex.recordRemovedSelection(fragment.getId());
    }
}

From source file:org.gdms.source.DefaultSourceManager.java

private void removeFromSchema(String name) {
    if (name.isEmpty()) {
        throw new IllegalArgumentException("Empty table name!");
    }//  ww w .ja  va2  s.  c  o m

    // split on the dots '.' into
    // schema1.schema2.schema3.table1

    String[] l = DOT.split(name);

    if (l.length <= 1) {
        // just a table, we remove it from the root schema
        schema.removeTable(name);
    } else {
        Deque<Schema> path = new ArrayDeque<Schema>();
        path.add(schema);
        // we get down
        // to the last schema before the table
        for (int i = 0; i < l.length - 1; i++) {
            final Schema n = path.getFirst().getSubSchemaByName(l[i]);
            path.addFirst(n);
        }

        boolean stop = false;
        while (!path.isEmpty() && !stop) {
            // take the last schema in the path (top of the pile)
            final Schema n = path.pollFirst();
            n.removeTable(l[l.length - 1]);
            if (n.getTableCount() != 0 || n.getSubSchemaNames().length != 0) {
                // the schema is still needed, we must not remove it
                stop = true;
            } else {
                Schema p = n.getParentSchema();
                if (p != null) {
                    p.removeSubSchema(n.getName());
                } else {
                    // we have reached root, it stays were it is...
                    stop = true;
                }
            }
        }
    }
}

From source file:org.ciasaboark.tacere.service.EventSilencerService.java

private void checkForActiveEventsAndSilence() {
    Log.d(TAG, "checking for events to silence");
    Deque<EventInstance> events = databaseInterface.getAllActiveEvents();
    boolean foundEvent = false;
    for (EventInstance event : events) {
        if (shouldEventSilence(event)) {
            Log.d(TAG, "found event to silence for: " + event.toString());
            foundEvent = true;/*from www. j  a v  a2s  .c  o m*/
            silenceForEventAndShowNotification(event);
            break;
        }
    }

    if (!foundEvent) {
        Log.d(TAG, "did not find an event to silence for");
        //if we couldn't find any events, then do some cleanup and schedule the service to be
        //restarted when the last active checked event ends. If there were no active events,
        //then we need to sleep until the next event in the database starts.  If there are no
        //events in the database either, then just sleep for 24 hours.
        if (stateManager.isEventActive()) {
            Log.d(TAG, "event previously active, but non active now, restoring volumes");
            vibrate();
            stateManager.resetServiceState();
            notificationManager.cancelAllNotifications();
            volumesManager.restoreVolumes();
            ringerStateManager.restorePhoneRinger();
            notifyCursorAdapterDataChanged();
        }

        long wakeAt;
        if (!events.isEmpty()) {
            EventInstance lastActiveEvent = events.getLast();
            wakeAt = lastActiveEvent.getOriginalEnd();
            Log.d(TAG, "sleeping until last known event ends at " + wakeAt);
        } else {
            EventInstance nextInactiveEvent = databaseInterface.nextEvent();
            if (nextInactiveEvent != null) {
                wakeAt = nextInactiveEvent.getBegin()
                        - (EventInstance.MILLISECONDS_IN_MINUTE * prefs.getBufferMinutes());
                Log.d(TAG, "sleeping until next known event starts at " + wakeAt);
            } else {
                Log.d(TAG, "sleeping for 24 hrs");
                wakeAt = System.currentTimeMillis() + EventInstance.MILLISECONDS_IN_DAY;
            }
        }

        alarmManager.scheduleNormalWakeAt(wakeAt);
    }
}

From source file:cc.kave.commons.pointsto.analysis.unification.UnificationAnalysisVisitorContext.java

/**
 * Reruns the unification until all lazily added locations have propagated
 * and no more changes are detected./*from  ww  w  .  j av  a 2 s  .  co m*/
 * 
 * {@link LocationIdentifier} are added lazily to
 * {@link ExtendedReferenceLocation} instances. If a location is added to an
 * already unified {@link ExtendedReferenceLocation}, the unification has to
 * be applied again to ensure correctness of the result.
 */
private void finalizePendingUnifications() {
    Deque<Pair<ReferenceLocation, ReferenceLocation>> worklist = new ArrayDeque<>();
    for (Map.Entry<ReferenceLocation, ReferenceLocation> locations : pendingUnifications.entries()) {
        ReferenceLocation refLoc1 = locations.getKey();
        ReferenceLocation refLoc2 = locations.getValue();

        int loc1Identifiers = refLoc1.getIdentifiers().size();
        int loc2Identifiers = refLoc2.getIdentifiers().size();

        if (loc1Identifiers != loc2Identifiers) {
            worklist.addFirst(ImmutablePair.of(refLoc1, refLoc2));
        }
    }

    while (!worklist.isEmpty()) {
        Pair<ReferenceLocation, ReferenceLocation> locations = worklist.removeFirst();
        ReferenceLocation loc1 = locations.getLeft();
        ReferenceLocation loc2 = locations.getRight();

        int previousIdentifiersLoc1 = loc1.getIdentifiers().size();
        int previousIdentifiersLoc2 = loc2.getIdentifiers().size();
        unify(loc1, loc2);

        updateUnificationWorklist(worklist, previousIdentifiersLoc1, loc1, loc2);
        updateUnificationWorklist(worklist, previousIdentifiersLoc2, loc2, loc1);
    }
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

private Map<LoadQueueItem, ByteBuffer> performBulkLoad(Admin admin, Table table, RegionLocator regionLocator,
        Deque<LoadQueueItem> queue, ExecutorService pool, SecureBulkLoadClient secureClient, boolean copyFile)
        throws IOException {
    int count = 0;

    fsDelegationToken.acquireDelegationToken(queue.peek().getFilePath().getFileSystem(getConf()));
    bulkToken = secureClient.prepareBulkLoad(admin.getConnection());
    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = null;

    Map<LoadQueueItem, ByteBuffer> item2RegionMap = new HashMap<>();
    // Assumes that region splits can happen while this occurs.
    while (!queue.isEmpty()) {
        // need to reload split keys each iteration.
        final Pair<byte[][], byte[][]> startEndKeys = regionLocator.getStartEndKeys();
        if (count != 0) {
            LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + queue.size()
                    + " files remaining to group or split");
        }/* w ww.j  a  v  a  2 s .c om*/

        int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
        maxRetries = Math.max(maxRetries, startEndKeys.getFirst().length + 1);
        if (maxRetries != 0 && count >= maxRetries) {
            throw new IOException("Retry attempted " + count + " times without completing, bailing out");
        }
        count++;

        // Using ByteBuffer for byte[] equality semantics
        pair = groupOrSplitPhase(table, pool, queue, startEndKeys);
        Multimap<ByteBuffer, LoadQueueItem> regionGroups = pair.getFirst();

        if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
            // Error is logged inside checkHFilesCountPerRegionPerFamily.
            throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
                    + " hfiles to one family of one region");
        }

        bulkLoadPhase(table, admin.getConnection(), pool, queue, regionGroups, copyFile, item2RegionMap);

        // NOTE: The next iteration's split / group could happen in parallel to
        // atomic bulkloads assuming that there are splits and no merges, and
        // that we can atomically pull out the groups we want to retry.
    }

    if (!queue.isEmpty()) {
        throw new RuntimeException(
                "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
    }
    return item2RegionMap;
}

From source file:com.spotify.helios.client.DefaultRequestDispatcher.java

/**
 * Sets up a connection, retrying on connect failure.
 *//*from w w  w  . ja va  2s .co  m*/
private HttpURLConnection connect(final URI uri, final String method, final byte[] entity,
        final Map<String, List<String>> headers)
        throws URISyntaxException, IOException, TimeoutException, InterruptedException, HeliosException {
    final long deadline = currentTimeMillis() + RETRY_TIMEOUT_MILLIS;
    final int offset = ThreadLocalRandom.current().nextInt();

    while (currentTimeMillis() < deadline) {
        final List<URI> endpoints = endpointSupplier.get();
        if (endpoints.isEmpty()) {
            throw new RuntimeException("failed to resolve master");
        }
        log.debug("endpoint uris are {}", endpoints);

        // Resolve hostname into IPs so client will round-robin and retry for multiple A records.
        // Keep a mapping of IPs to hostnames for TLS verification.
        final List<URI> ipEndpoints = Lists.newArrayList();
        final Map<URI, URI> ipToHostnameUris = Maps.newHashMap();

        for (final URI hnUri : endpoints) {
            try {
                final InetAddress[] ips = InetAddress.getAllByName(hnUri.getHost());
                for (final InetAddress ip : ips) {
                    final URI ipUri = new URI(hnUri.getScheme(), hnUri.getUserInfo(), ip.getHostAddress(),
                            hnUri.getPort(), hnUri.getPath(), hnUri.getQuery(), hnUri.getFragment());
                    ipEndpoints.add(ipUri);
                    ipToHostnameUris.put(ipUri, hnUri);
                }
            } catch (UnknownHostException e) {
                log.warn("Unable to resolve hostname {} into IP address: {}", hnUri.getHost(), e);
            }
        }

        for (int i = 0; i < ipEndpoints.size() && currentTimeMillis() < deadline; i++) {
            final URI ipEndpoint = ipEndpoints.get(positive(offset + i) % ipEndpoints.size());
            final String fullpath = ipEndpoint.getPath() + uri.getPath();

            final String scheme = ipEndpoint.getScheme();
            final String host = ipEndpoint.getHost();
            final int port = ipEndpoint.getPort();
            if (!VALID_PROTOCOLS.contains(scheme) || host == null || port == -1) {
                throw new HeliosException(String.format(
                        "Master endpoints must be of the form \"%s://heliosmaster.domain.net:<port>\"",
                        VALID_PROTOCOLS_STR));
            }

            final URI realUri = new URI(scheme, host + ":" + port, fullpath, uri.getQuery(), null);

            AgentProxy agentProxy = null;
            Deque<Identity> identities = Queues.newArrayDeque();
            try {
                if (scheme.equals("https")) {
                    agentProxy = AgentProxies.newInstance();
                    for (final Identity identity : agentProxy.list()) {
                        if (identity.getPublicKey().getAlgorithm().equals("RSA")) {
                            // only RSA keys will work with our TLS implementation
                            identities.offerLast(identity);
                        }
                    }
                }
            } catch (Exception e) {
                log.warn("Couldn't get identities from ssh-agent", e);
            }

            try {
                do {
                    final Identity identity = identities.poll();

                    try {
                        log.debug("connecting to {}", realUri);

                        final HttpURLConnection connection = connect0(realUri, method, entity, headers,
                                ipToHostnameUris.get(ipEndpoint).getHost(), agentProxy, identity);

                        final int responseCode = connection.getResponseCode();
                        if (((responseCode == HTTP_FORBIDDEN) || (responseCode == HTTP_UNAUTHORIZED))
                                && !identities.isEmpty()) {
                            // there was some sort of security error. if we have any more SSH identities to try,
                            // retry with the next available identity
                            log.debug("retrying with next SSH identity since {} failed", identity.getComment());
                            continue;
                        }

                        return connection;
                    } catch (ConnectException | SocketTimeoutException | UnknownHostException e) {
                        // UnknownHostException happens if we can't resolve hostname into IP address.
                        // UnknownHostException's getMessage method returns just the hostname which is a
                        // useless message, so log the exception class name to provide more info.
                        log.debug(e.toString());
                        // Connecting failed, sleep a bit to avoid hammering and then try another endpoint
                        Thread.sleep(200);
                    }
                } while (false);
            } finally {
                if (agentProxy != null) {
                    agentProxy.close();
                }
            }
        }
        log.warn("Failed to connect, retrying in 5 seconds.");
        Thread.sleep(5000);
    }
    throw new TimeoutException("Timed out connecting to master");
}

From source file:org.shaman.terrain.polygonal.PolygonalMapGenerator.java

/**
 * Step 4: assign elevation/*from  w ww . j  a v a 2s  . com*/
 */
private void assignElevation() {
    if (graph == null) {
        return;
    }
    Random rand = new Random(seed * 2);
    //initialize border corners with zero elevation
    Deque<Graph.Corner> q = new ArrayDeque<>();
    for (Graph.Corner c : graph.corners) {
        if (c.border) {
            c.elevation = 0;
            q.add(c);
        } else {
            c.elevation = Float.POSITIVE_INFINITY;
        }
    }
    // Traverse the graph and assign elevations to each point. As we
    // move away from the map border, increase the elevations. This
    // guarantees that rivers always have a way down to the coast by
    // going downhill (no local minima).
    while (!q.isEmpty()) {
        Graph.Corner c = q.poll();
        for (Graph.Corner a : c.adjacent) {
            if (c.ocean && a.ocean && a.elevation > 0) {
                a.elevation = 0;
                q.addFirst(a);
                continue;
            }
            float elevation = c.elevation + (a.ocean ? 0 : 0.01f);
            if (!c.water && !a.water) {
                elevation += 1;
            }
            //add some more randomness
            //elevation += rand.nextDouble()/4;
            if (elevation < a.elevation) {
                a.elevation = elevation;
                q.add(a);
            }
        }
    }

    //redistribute elevation
    float SCALE_FACTOR = 1.1f;
    ArrayList<Graph.Corner> corners = new ArrayList<>();
    for (Graph.Corner c : graph.corners) {
        if (!c.ocean) {
            corners.add(c);
        }
    }
    Collections.sort(corners, new Comparator<Graph.Corner>() {
        @Override
        public int compare(Graph.Corner o1, Graph.Corner o2) {
            return Float.compare(o1.elevation, o2.elevation);
        }
    });
    for (int i = 0; i < corners.size(); i++) {
        // Let y(x) be the total area that we want at elevation <= x.
        // We want the higher elevations to occur less than lower
        // ones, and set the area to be y(x) = 1 - (1-x)^2.
        float y = (float) i / (float) (corners.size() - 1);
        float x = (float) (Math.sqrt(SCALE_FACTOR) - Math.sqrt(SCALE_FACTOR * (1 - y)));
        if (x > 1.0)
            x = 1; // TODO: does this break downslopes?
        corners.get(i).elevation = x;
    }

    assignCenterElevations();

    //update mesh
    updateElevationGeometry();
}