Example usage for java.util Queue add

List of usage examples for java.util Queue add

Introduction

In this page you can find the example usage for java.util Queue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:com.bluepowermod.part.tube.TubeLogic.java

/**
 * This method gets the end target and heading for a TubeStack. When the tubestack's target variable is null, this is an exporting item, meaning
 * the returned target will be the TileEntity the item is going to transport to. When the tubestack's target variable is not not, the item is
 * being retrieved to this inventory. The returned target is the inventory the item came/should come from.
 *
 * @param simulate/*from   w  w  w.  ja  v  a  2  s.  com*/
 *            The only difference between simulate and not simulate is the fact that the round robin handling will be updated in non-simulate.
 * @param from
 *            The direction this item came from, this direction will never be a valid heading. Is null in normal item routing, as the from
 *            direction IS a valid output.
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
private Pair<ForgeDirection, TileEntity> getHeadingForItem(TubeStack stack, boolean simulate) {

    Map<TubeNode, Integer> distances = new HashMap<TubeNode, Integer>();
    Queue<TubeNode> traversingNodes = new LinkedBlockingQueue<TubeNode>();
    Queue<ForgeDirection> trackingExportDirection = new LinkedBlockingQueue<ForgeDirection>();
    Map<TubeEdge, ForgeDirection> validDestinations = new LinkedHashMap<TubeEdge, ForgeDirection>();// using a LinkedHashMap so the order doesn't
    // change, used for round robin.

    if (getNode() != null) {
        distances.put(getNode(), 0);// make this the origin.
        traversingNodes.add(getNode());
    }

    boolean firstRun = true;
    int closestDest = 0;
    while (!traversingNodes.isEmpty()) {
        TubeNode node = traversingNodes.poll();
        if (node.edges == null)
            node.init();
        ForgeDirection heading = firstRun ? null : trackingExportDirection.poll();
        for (int i = 0; i < 6; i++) {
            if (firstRun)
                heading = ForgeDirection.getOrientation(i);
            if (node.edges != null) {
                TubeEdge edge = node.edges[i];
                if (edge != null && canPassThroughMask(stack.color, edge.colorMask)) {// if this item can travel through this color mask proceed.
                    Integer distance = distances.get(edge.target);
                    if (distance == null || distances.get(node) + edge.distance < distance) {
                        distances.put(edge.target, distances.get(node) + edge.distance);
                        if (edge.target.target instanceof PneumaticTube) {
                            traversingNodes.add(edge.target);
                            trackingExportDirection.add(heading);
                        } else if (stack.getTarget(tube.getWorld()) == null
                                && edge.isValidForExportItem(stack.stack)
                                || stack.heading == null && edge.isValidForImportItem(stack)
                                || stack.heading != null
                                        && stack.getTarget(tube.getWorld()) == edge.target.target
                                        && edge.targetConnectionSide.getOpposite() == stack
                                                .getTargetEntryDir()) {
                            validDestinations.put(edge,
                                    stack.heading == null ? edge.targetConnectionSide : heading);
                        }
                    }
                }
            }
        }

        // Check the distances of the current breadth first search layer. if no points are closer than the currently valid destination(s), we're
        // done searching.
        boolean isDoneSearching = true;
        closestDest = getClosestDestination(validDestinations.keySet(), distances);
        for (TubeNode checkingNode : traversingNodes) {
            if (distances.get(checkingNode) <= closestDest) {
                isDoneSearching = false;
                break;
            }
        }
        if (isDoneSearching)
            break;
        firstRun = false;
    }

    if (validDestinations.size() == 0) {
        if (stack.getTarget(tube.getWorld()) != null && stack.heading != null && !simulate) {
            stack.setTarget(null, ForgeDirection.UNKNOWN);// if we can't reach the retrieving target anymore, reroute as normal.
            return getHeadingForItem(stack, simulate);
        } else {
            return null;
        }
    }

    List<Pair<ForgeDirection, TileEntity>> validDirections = new ArrayList<Pair<ForgeDirection, TileEntity>>();
    for (Map.Entry<TubeEdge, ForgeDirection> entry : validDestinations.entrySet()) {
        if (distances.get(entry.getKey().target) == closestDest) {
            validDirections.add(new ImmutablePair(entry.getValue(), entry.getKey().target.target));
        }
    }

    // handle round robin
    if (!simulate)
        roundRobinCounter++;
    if (roundRobinCounter >= validDirections.size())
        roundRobinCounter = 0;
    return validDirections.get(roundRobinCounter);
}

From source file:candr.yoclip.ParserTest.java

@Test
public void getParsedArgument() {
    final ParserOptions<ParserTest> mockParserOptions = createMockParserParameters();

    final Queue<String> parameters = new LinkedList<String>();

    final Parser<ParserTest> testCase = new Parser<ParserTest>(mockParserOptions,
            createMockParserHelpFactory());
    assertThat("empty queue", testCase.getParsedArgument(parameters), nullValue());

    final String expectedParameter = "argument parameter";
    parameters.add(expectedParameter);
    ParsedOption<ParserTest> parsedOption = testCase.getParsedArgument(parameters);
    assertThat("parsed parameter error", parsedOption.isError(), is(true));
    assertThat("queue size after parsed parameter error", parameters.size(), is(0));

    final ParserOption<ParserTest> mockArguments = createMockArguments();
    when(mockParserOptions.getArguments()).thenReturn(mockArguments);

    final String remainingParameter = "remaining parameter";
    parameters.add(expectedParameter);// w  w w .j ava2  s.c  o  m
    parameters.add(remainingParameter);
    parsedOption = testCase.getParsedArgument(parameters);
    assertThat("parsed parameter not error", parsedOption.isError(), is(false));
    assertThat("parser parameter", parsedOption.getParserOption(), is(mockArguments));
    assertThat("value", parsedOption.getValue(), is(expectedParameter));
    assertThat("queue size", parameters.size(), is(1));
    assertThat("remaining parameter", parameters.remove(), is(remainingParameter));
}

From source file:voldemort.tools.KeyVersionFetcherCLI.java

public boolean sampleStore(StoreDefinition storeDefinition) {
    String storeName = storeDefinition.getName();

    String keysFileName = inDir + System.getProperty("file.separator") + storeName + ".keys";
    File keysFile = new File(keysFileName);
    if (!keysFile.exists()) {
        logger.error("Keys file " + keysFileName + " does not exist!");
        return false;
    }/*w ww  . j  av  a  2s  . c om*/

    String kvFileName = outDir + System.getProperty("file.separator") + storeName + ".kvs";
    File kvFile = new File(kvFileName);
    if (kvFile.exists()) {
        logger.info("Key-Version file " + kvFileName + " exists, so will not sample keys from file "
                + keysFileName + ".");
        return true;
    }

    BaseStoreRoutingPlan storeRoutingPlan = new BaseStoreRoutingPlan(cluster, storeDefinition);
    BufferedReader keyReader = null;
    BufferedWriter kvWriter = null;
    try {
        keyReader = new BufferedReader(new FileReader(keysFileName));
        kvWriter = new BufferedWriter(new FileWriter(kvFileName));

        boolean readAllKeys = false;
        while (!readAllKeys) {
            Queue<Future<String>> futureKVs = new LinkedList<Future<String>>();
            for (int numFetchTasks = 0; numFetchTasks < this.outputBatchSize; numFetchTasks++) {
                String keyLine = keyReader.readLine();
                if (keyLine == null) {
                    readAllKeys = true;
                    break;
                }
                byte[] keyInBytes = ByteUtils.fromHexString(keyLine.trim());
                FetchKeyVersionsTask kvFetcher = new FetchKeyVersionsTask(storeRoutingPlan, keyInBytes);
                Future<String> future = kvFetcherService.submit(kvFetcher);
                futureKVs.add(future);
            }

            if (futureKVs.size() > 0) {
                while (!futureKVs.isEmpty()) {
                    Future<String> future = futureKVs.poll();
                    String keyVersions = future.get();
                    kvWriter.append(keyVersions);
                }
            }
        }
        return true;
    } catch (DecoderException de) {
        logger.error("Could not decode key to sample for store " + storeName, de);
        return false;
    } catch (IOException ioe) {
        logger.error("IOException caught while sampling store " + storeName, ioe);
        return false;
    } catch (InterruptedException ie) {
        logger.error("InterruptedException caught while sampling store " + storeName, ie);
        return false;
    } catch (ExecutionException ee) {
        logger.error("Encountered an execution exception while sampling " + storeName, ee);
        ee.printStackTrace();
        return false;
    } finally {
        if (keyReader != null) {
            try {
                keyReader.close();
            } catch (IOException e) {
                logger.error("IOException caught while trying to close keyReader for store " + storeName, e);
                e.printStackTrace();
            }
        }
        if (kvWriter != null) {
            try {
                kvWriter.close();
            } catch (IOException e) {
                logger.error("IOException caught while trying to close kvWriter for store " + storeName, e);
                e.printStackTrace();
            }
        }
    }
}

From source file:it.geosolutions.geobatch.migrationmonitor.statuschecker.CheckerAction.java

@Override
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> arg0) throws ActionException {

    // return object
    final Queue<FileSystemEvent> outputEvents = new LinkedList<FileSystemEvent>();

    try {/* w  w  w . j  ava  2  s  .co  m*/
        //gather the input file in order to read the database table name
        File flowTempDirectory = new File(getTempDir().getParent());
        File[] files = flowTempDirectory.listFiles();
        File inputEventFile = null;
        if (files != null && files.length > 0) {
            for (File f : files) {
                if (f.isFile() && f.getName().endsWith(".xml")) {
                    inputEventFile = f;
                }
            }
        } else {
            throw new Exception("One file, type xml,  is expected in the root of the temp directory");
        }

        // set as the action output event the flow input event
        FileSystemEvent fse = new FileSystemEvent(inputEventFile, FileSystemEventType.FILE_ADDED);
        outputEvents.add(fse);

        //parse the xml file and get the table name
        String tableName = "";
        String host = "";
        String db = "";
        String schema = "";
        try {
            DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
            DocumentBuilder dBuilder;
            dBuilder = dbFactory.newDocumentBuilder();
            Document doc = dBuilder.parse(inputEventFile);
            doc.getDocumentElement().normalize();
            NodeList nodes = doc.getElementsByTagName("typeName");
            if (nodes == null || nodes.getLength() != 1) {
                throw new Exception(
                        "more than one typeName has been found in the input event... this is not possible...");
            }
            Node n = nodes.item(0);
            tableName = n.getTextContent();
            NodeList entries = doc.getElementsByTagName("entry");
            host = extractEntry("server", entries);
            if (host == null) {
                host = extractEntry("host", entries);
            }
            db = extractEntry("instance", entries);
            if (db == null) {
                db = extractEntry("database", entries);
            }
            schema = extractEntry("schema", entries);
            LOGGER.info("Changing state to MIGRATED for records with: server_ip:'" + host + "' db:'" + db
                    + "' schema_nome:'" + schema + "' tabella:'" + tableName + "'");
        } catch (Exception e) {
            LOGGER.error(e.getMessage(), e);
            throw new Exception("Error while parsing input file... exception message: " + e.getMessage());
        }
        LOGGER.info("The table name is: " + tableName);

        //change the status in the strati_rif table
        MigrationMonitor mm = migrationMonitorDAO.findByTablename(host, db, schema, tableName);
        mm.setMigrationStatus(MigrationStatus.MIGRATED.toString().toUpperCase());
        migrationMonitorDAO.merge(mm);

    } catch (Exception t) {
        final String message = "CheckerAction::execute(): " + t.getLocalizedMessage();
        if (LOGGER.isErrorEnabled())
            LOGGER.error(message, t);
        final ActionException exc = new ActionException(this, message, t);
        listenerForwarder.failed(exc);
        throw exc;
    }

    return outputEvents;
}

From source file:com.streamsets.pipeline.lib.jdbc.multithread.TestMultithreadedTableProvider.java

@Test
public void tableAndSchemasFinished() throws InterruptedException, StageException {
    String schema1 = "schema1";
    String table1Name = "table1";
    String table2Name = "table2";
    String schema2 = "schema2";
    String table3Name = "table3";

    String offsetCol = null;// ww w. j  a  v  a  2 s  .  c om
    final String partitionSize = null;
    int maxActivePartitions = 0;
    int threadNumber = 0;
    int numThreads = 1;

    TableContext tableContext1 = createTableContext(schema1, table1Name, offsetCol, partitionSize,
            maxActivePartitions, false);
    TableContext tableContext2 = createTableContext(schema1, table2Name, offsetCol, partitionSize,
            maxActivePartitions, false);
    TableContext tableContext3 = createTableContext(schema2, table3Name, offsetCol, partitionSize,
            maxActivePartitions, false);

    Map<String, TableContext> tableContextMap = new HashMap<>();

    tableContextMap.put(tableContext1.getQualifiedName(), tableContext1);
    tableContextMap.put(tableContext2.getQualifiedName(), tableContext2);
    tableContextMap.put(tableContext3.getQualifiedName(), tableContext3);
    Queue<String> sortedTableOrder = new LinkedList<>();

    sortedTableOrder.add(tableContext1.getQualifiedName());
    sortedTableOrder.add(tableContext2.getQualifiedName());
    sortedTableOrder.add(tableContext3.getQualifiedName());

    Map threadNumToMaxTableSlots = new HashMap<>();

    BatchTableStrategy batchTableStrategy = BatchTableStrategy.PROCESS_ALL_AVAILABLE_ROWS_FROM_TABLE;
    MultithreadedTableProvider provider = new MultithreadedTableProvider(tableContextMap, sortedTableOrder,
            threadNumToMaxTableSlots, numThreads, batchTableStrategy);

    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(3));

    TableRuntimeContext table1 = provider.nextTable(threadNumber);
    Assert.equals(table1Name, table1.getSourceTableContext().getTableName());

    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(3));
    // there should be two tables remaining in schema1 (table1 and table2)
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema1).size(), equalTo(2));
    // and one remaining in schema2 (table3)
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    final AtomicBoolean tableFinished = new AtomicBoolean(false);
    final AtomicBoolean schemaFinished = new AtomicBoolean(false);
    final List<String> schemaFinishedTables = new LinkedList<>();

    // finish table1
    provider.reportDataOrNoMoreData(table1, 10, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // table should be finished
    assertTrue(tableFinished.get());

    // schema should not
    assertFalse(schemaFinished.get());
    assertThat(schemaFinishedTables, empty());
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(1));

    // there should be a total of two remaining entries in the map
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(2));
    // one of which is in schema1
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema1).size(), equalTo(1));
    // and one of which is in schema2
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    provider.releaseOwnedTable(table1, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    TableRuntimeContext table2 = provider.nextTable(threadNumber);
    Assert.equals(table2Name, table2.getSourceTableContext().getTableName());

    // finish table2
    provider.reportDataOrNoMoreData(table2, 10, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // table should be finished
    assertTrue(tableFinished.get());
    // as should the schema this time
    assertTrue(schemaFinished.get());
    assertThat(schemaFinishedTables, hasSize(2));
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(2));
    // there should only be one entry left now
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(1));
    assertTrue(provider.getRemainingSchemasToTableContexts().get(schema1).isEmpty());
    // which is for schema2
    assertThat(provider.getRemainingSchemasToTableContexts().get(schema2).size(), equalTo(1));

    provider.releaseOwnedTable(table2, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    TableRuntimeContext table3 = provider.nextTable(threadNumber);
    Assert.equals(table3Name, table3.getSourceTableContext().getTableName());

    // suppose we did NOT actually reach the end of table3, in which case the conditions should be the same as above
    provider.reportDataOrNoMoreData(table3, 10, 10, false, tableFinished, schemaFinished, schemaFinishedTables);

    // now neither the table
    assertFalse(tableFinished.get());
    // nor schema should be finished
    assertFalse(schemaFinished.get());
    assertThat(schemaFinishedTables, empty());
    // and entries in the map should be the same as above
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(2));
    assertThat(provider.getRemainingSchemasToTableContexts().size(), equalTo(1));
    assertTrue(provider.getRemainingSchemasToTableContexts().get(schema1).isEmpty());

    provider.releaseOwnedTable(table3, 1);
    tableFinished.set(false);
    schemaFinished.set(false);
    schemaFinishedTables.clear();

    // cycle through table1 and table2 again
    table1 = provider.nextTable(threadNumber);
    provider.releaseOwnedTable(table1, 1);
    table2 = provider.nextTable(threadNumber);
    provider.releaseOwnedTable(table2, 1);

    // and get back to table3
    table3 = provider.nextTable(threadNumber);
    Assert.equals(table3Name, table3.getSourceTableContext().getTableName());

    // now suppose we have finally finished table3
    provider.reportDataOrNoMoreData(table3, 3, 10, true, tableFinished, schemaFinished, schemaFinishedTables);

    // both table
    assertTrue(tableFinished.get());
    // and schema should be finished
    assertTrue(schemaFinished.get());
    assertThat(schemaFinishedTables, hasSize(1));
    assertThat(provider.getTablesWithNoMoreData().size(), equalTo(3));
    // there should now be no more entries in this map
    assertTrue(provider.getRemainingSchemasToTableContexts().isEmpty());

    provider.releaseOwnedTable(table3, 1);

    assertTrue(provider.shouldGenerateNoMoreDataEvent());

}

From source file:it.geosolutions.geobatch.unredd.script.publish.PublishingAction.java

/**
 * Main loop on input files. Single file processing is called on
 * execute(File xmlFile)/*from   w  w  w  . ja  v a 2  s.  c  o  m*/
 */
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {

    // ****************************************
    // initialize PostGISUtils, Geostore and paths
    //
    // ****************************************

    try {
        initialize();
    } catch (Exception e) {
        LOGGER.error("Exception during component initialization", e);
        throw new ActionException(this, "Exception during initialization");
    }

    final Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>();
    while (!events.isEmpty()) {
        final FileSystemEvent ev = events.remove();

        try {
            if (ev != null) {
                if (LOGGER.isTraceEnabled()) {
                    LOGGER.trace("PublishingAction.execute(): working on incoming event: " + ev.getSource());
                }

                File xmlFile = ev.getSource(); // this is the input xml file
                executeInternal(xmlFile);
                ret.add(new FileSystemEvent(xmlFile, FileSystemEventType.FILE_ADDED));

            } else {
                LOGGER.error("PublishingAction.execute(): Encountered a NULL event: SKIPPING...");
                continue;
            }

        } catch (ActionException ex) { // ActionEx have already been processed
            LOGGER.error(ex.getMessage(), ex);
            throw ex;

        } catch (Exception ex) {
            final String message = "PublishingAction.execute(): Unable to produce the output: "
                    + ex.getLocalizedMessage();
            LOGGER.error(message, ex);
            throw new ActionException(this, message);
        }
    }

    return ret;
}

From source file:org.jboss.errai.ioc.rebind.ioc.graph.impl.DependencyGraphBuilderImpl.java

private void removeUnreachableConcreteInjectables() {
    final Set<String> reachableNames = new HashSet<String>();
    final Queue<Injectable> processingQueue = new LinkedList<Injectable>();
    for (final Injectable injectable : concretesByName.values()) {
        if (!injectable.getWiringElementTypes().contains(WiringElementType.Simpleton)
                && !reachableNames.contains(injectable.getFactoryName())) {
            processingQueue.add(injectable);
            do {/*  w  w w  .j  a  v a2 s .  c o m*/
                final Injectable processedInjectable = processingQueue.poll();
                reachableNames.add(processedInjectable.getFactoryName());
                for (final Dependency dep : processedInjectable.getDependencies()) {
                    final Injectable resolvedDep = getResolvedDependency(dep, processedInjectable);
                    if (!reachableNames.contains(resolvedDep.getFactoryName())) {
                        processingQueue.add(resolvedDep);
                    }
                }
            } while (processingQueue.size() > 0);
        }
    }

    concretesByName.keySet().retainAll(reachableNames);
}

From source file:com.baifendian.swordfish.common.utils.graph.Graph.java

/**
 *  topological ?/* w  ww .  ja va2  s  .  c om*/
 *
 * @return key ?, ? true, () false, value  topology sort
 */
private Map.Entry<Boolean, List<VK>> topologicalSortImpl() {
    List<VK> sort = new ArrayList<>();
    Queue<VK> zeroVertex = new LinkedList<>();
    Map<VK, Integer> indegrees = new HashMap<>();

    synchronized (this) {
        // ? vertex , ??
        for (Map.Entry<VK, VD> id2Vertex : vertices.entrySet()) {
            VK key = id2Vertex.getKey();
            int inDegree = getIndegree(key);

            if (inDegree == 0) {
                sort.add(key);
                zeroVertex.add(key);
            } else {
                indegrees.put(key, inDegree);
            }
        }

        //  topology ,  0 , ??
        while (!zeroVertex.isEmpty()) {
            VK key = zeroVertex.poll();
            Collection<VK> postNodes = getPostNode(key);

            for (VK postKey : postNodes) {
                int d = indegrees.getOrDefault(postKey, 0);

                if (d <= 1) {
                    sort.add(postKey);
                    indegrees.remove(postKey);
                    zeroVertex.add(postKey);
                } else {
                    indegrees.put(postKey, d - 1);
                }
            }
        }
    }

    // indegrees , , ?
    return new AbstractMap.SimpleEntry(indegrees.isEmpty(), sort);
}

From source file:eu.netide.mms.MMSManager.java

private List<MMSStoreEntry> searchRuleGraph(MMSStoreEntry entry) {

    Queue<MMSStoreEntry> queue = new LinkedList<MMSStoreEntry>();
    List<MMSStoreEntry> listToDelete = Lists.newArrayList();
    MMSStoreEntry copyEntry = new DefaultMMSEntry(entry);

    copyEntry.setVisited(true);//from   w  w  w. j  a  va  2  s .  c o m
    queue.add(copyEntry);

    while (!queue.isEmpty()) {
        MMSStoreEntry v = queue.poll();
        for (MMSStoreEntry w : v.getRuleParents()) {
            MMSStoreEntry copyInside = new DefaultMMSEntry(w);
            if (!copyInside.getVisited()) {
                copyInside.setVisited(true);
                queue.add(copyInside);
                listToDelete.add(w);
            }
        }
    }

    return listToDelete;
}