Example usage for java.util Queue poll

List of usage examples for java.util Queue poll

Introduction

In this page you can find the example usage for java.util Queue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:ubic.gemma.core.search.SearchServiceImpl.java

private Collection<SearchResult> characteristicExpressionExperimentSearch(final SearchSettings settings) {

    Collection<Class<?>> classToSearch = new ArrayList<>(1); // this is a collection because of the API
    // for characteristicService; could add
    // findByUri(Class<?>...)

    // order matters if we hit the limits
    Queue<Class<?>> orderedClassesToSearch = new LinkedList<>();
    orderedClassesToSearch.add(ExpressionExperiment.class);
    orderedClassesToSearch.add(FactorValue.class);
    orderedClassesToSearch.add(BioMaterial.class);

    Collection<SearchResult> results = new HashSet<>();

    StopWatch watch = new StopWatch();
    watch.start();/*from   www  .j  ava 2  s  .  c  o  m*/

    while (results.size() < SearchServiceImpl.SUFFICIENT_EXPERIMENT_RESULTS_FROM_CHARACTERISTICS
            && !orderedClassesToSearch.isEmpty()) {
        classToSearch.clear();
        classToSearch.add(orderedClassesToSearch.poll());
        // We handle the OR clauses here.
        String[] subclauses = settings.getQuery().split(" OR ");
        for (String subclause : subclauses) {
            /*
             * Note that the AND is applied only within one entity type. The fix would be to apply AND at this
             * level.
             */
            Collection<SearchResult> classResults = this.characteristicSearchWithChildren(classToSearch,
                    subclause);
            if (!classResults.isEmpty()) {
                String msg = "Found " + classResults.size() + " "
                        + classToSearch.iterator().next().getSimpleName()
                        + " results from characteristic search.";
                if (results.size() >= SearchServiceImpl.SUFFICIENT_EXPERIMENT_RESULTS_FROM_CHARACTERISTICS) {
                    msg += " Total found > "
                            + SearchServiceImpl.SUFFICIENT_EXPERIMENT_RESULTS_FROM_CHARACTERISTICS
                            + ", will not search for more entities.";
                }
                SearchServiceImpl.log.info(msg);
            }
            results.addAll(classResults);
        }

    }

    SearchServiceImpl.log.debug("ExpressionExperiment search: " + settings + " -> " + results.size()
            + " characteristic hits " + watch.getTime() + " ms");

    // Note that if we do this earlier (within each query) the limit SUFFICIENT_EXPERIMENT_RESULTS_FROM_CHARACTERISTICS has
    // more meaning. We would have to unroll the loop above
    return filterExperimentHitsByTaxon(results, settings.getTaxon());
}

From source file:org.gvnix.flex.entity.ActionScriptEntityMetadataProvider.java

private void createActionScriptMirrorClass(String asEntityId, ActionScriptType asType, JavaType javaType) {
    Queue<TypeMapping> relatedTypes = new LinkedList<TypeMapping>();

    List<MetaTagAttributeValue<?>> attributes = new ArrayList<MetaTagAttributeValue<?>>();
    attributes.add(new StringAttributeValue(new ActionScriptSymbolName(ALIAS_ATTR),
            javaType.getFullyQualifiedTypeName()));
    ASMetaTagMetadata remoteClassTag = new DefaultASMetaTagMetadata(REMOTE_CLASS_TAG, attributes);
    List<ASMetaTagMetadata> typeMetaTags = new ArrayList<ASMetaTagMetadata>();
    typeMetaTags.add(remoteClassTag);/*from w w  w .j a v a  2 s .c o m*/

    // TODO - for now we will only handle classes...interfaces could come
    // later but would add complexity (i.e., need
    // to find all implementations and mirror those as well)

    List<ASFieldMetadata> declaredFields = new ArrayList<ASFieldMetadata>();
    MemberDetails memberDetails = getMemberDetails(javaType);
    for (MethodMetadata method : MemberFindingUtils.getMethods(memberDetails)) {
        if (BeanInfoUtils.isAccessorMethod(method)) {
            JavaSymbolName propertyName = BeanInfoUtils.getPropertyNameForJavaBeanMethod(method);
            FieldMetadata javaField = BeanInfoUtils.getFieldForPropertyName(memberDetails, propertyName);

            // TODO - We don't add any meta-tags and we set the field to
            // public - any other choice?
            ASFieldMetadata asField = ActionScriptMappingUtils.toASFieldMetadata(asEntityId, javaField, true);
            relatedTypes.addAll(findRequiredMappings(javaField, asField));
            declaredFields.add(asField);
        }
    }

    ASClassOrInterfaceTypeDetails asDetails = new DefaultASClassOrInterfaceTypeDetails(asEntityId, asType,
            ASPhysicalTypeCategory.CLASS, declaredFields, null, null, null, null, null, typeMetaTags);
    // new DefaultASClassOrInterfaceTypeDetails(declaredByMetadataId, name,
    // physicalTypeCategory, declaredFields,
    // declaredConstructor, declaredMethods, superClass, extendsTypes,
    // implementsTypes, typeMetaTags);
    ASPhysicalTypeMetadata asMetadata = new DefaultASPhysicalTypeMetadata(asEntityId,
            getPhysicalLocationCanonicalPath(asEntityId), asDetails);
    getAsPhysicalTypeProvider().createPhysicalType(asMetadata);

    // Now trigger the creation of any related types
    while (!relatedTypes.isEmpty()) {
        TypeMapping mapping = relatedTypes.poll();
        createActionScriptMirrorClass(mapping.getMetadataId(), mapping.getAsType(), mapping.getJavaType());
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

private void offloadLoop(CompletableFuture<PositionImpl> promise, Queue<LedgerInfo> ledgersToOffload,
        PositionImpl firstUnoffloaded, Optional<Throwable> firstError) {
    LedgerInfo info = ledgersToOffload.poll();
    if (info == null) {
        if (firstError.isPresent()) {
            promise.completeExceptionally(firstError.get());
        } else {/*from w  ww  .  ja v  a  2s .co  m*/
            promise.complete(firstUnoffloaded);
        }
    } else {
        long ledgerId = info.getLedgerId();
        UUID uuid = UUID.randomUUID();
        Map<String, String> extraMetadata = ImmutableMap.of("ManagedLedgerName", name);

        String driverName = config.getLedgerOffloader().getOffloadDriverName();
        Map<String, String> driverMetadata = config.getLedgerOffloader().getOffloadDriverMetadata();

        prepareLedgerInfoForOffloaded(ledgerId, uuid, driverName, driverMetadata)
                .thenCompose((ignore) -> getLedgerHandle(ledgerId))
                .thenCompose(readHandle -> config.getLedgerOffloader().offload(readHandle, uuid, extraMetadata))
                .thenCompose((ignore) -> {
                    return Retries
                            .run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1),
                                    TimeUnit.SECONDS.toHours(1)).limit(10), FAIL_ON_CONFLICT,
                                    () -> completeLedgerInfoForOffloaded(ledgerId, uuid), scheduledExecutor,
                                    name)
                            .whenComplete((ignore2, exception) -> {
                                if (exception != null) {
                                    cleanupOffloaded(ledgerId, uuid, driverName, driverMetadata,
                                            "Metastore failure");
                                }
                            });
                }).whenComplete((ignore, exception) -> {
                    if (exception != null) {
                        log.info("[{}] Exception occurred during offload", name, exception);

                        PositionImpl newFirstUnoffloaded = PositionImpl.get(ledgerId, 0);
                        if (newFirstUnoffloaded.compareTo(firstUnoffloaded) > 0) {
                            newFirstUnoffloaded = firstUnoffloaded;
                        }
                        Optional<Throwable> errorToReport = firstError;
                        synchronized (ManagedLedgerImpl.this) {
                            // if the ledger doesn't exist anymore, ignore the error
                            if (ledgers.containsKey(ledgerId)) {
                                errorToReport = Optional.of(firstError.orElse(exception));
                            }
                        }

                        offloadLoop(promise, ledgersToOffload, newFirstUnoffloaded, errorToReport);
                    } else {
                        ledgerCache.remove(ledgerId);
                        offloadLoop(promise, ledgersToOffload, firstUnoffloaded, firstError);
                    }
                });
    }
}

From source file:ubic.gemma.search.SearchServiceImpl.java

/**
 * @param settings//  w  w  w. j  a  v  a  2  s  .  c  om
 */
private Collection<SearchResult> characteristicExpressionExperimentSearch(final SearchSettings settings) {

    Collection<SearchResult> results = new HashSet<SearchResult>();

    Collection<Class<?>> classToSearch = new ArrayList<Class<?>>(1);
    Queue<Class<?>> orderedClassesToSearch = new LinkedList<Class<?>>();
    orderedClassesToSearch.add(ExpressionExperiment.class);
    orderedClassesToSearch.add(FactorValue.class);
    orderedClassesToSearch.add(BioMaterial.class);
    orderedClassesToSearch.add(Treatment.class);

    Collection<SearchResult> characterSearchResults = new HashSet<SearchResult>();

    while (characterSearchResults.size() < SUFFICIENT_EXPERIMENT_RESULTS_FROM_CHARACTERISTICS
            && !orderedClassesToSearch.isEmpty()) {
        classToSearch.clear();
        classToSearch.add(orderedClassesToSearch.poll());
        Collection<SearchResult> classResults = ontologySearchAnnotatedObject(classToSearch, settings);
        characterSearchResults.addAll(classResults);

        String msg = "Found " + classResults.size() + " " + classToSearch.iterator().next().getSimpleName()
                + " results from characteristic search.";
        if (characterSearchResults.size() >= SUFFICIENT_EXPERIMENT_RESULTS_FROM_CHARACTERISTICS) {
            msg += " Total found > " + SUFFICIENT_EXPERIMENT_RESULTS_FROM_CHARACTERISTICS
                    + ", will not search for more entities.";
        }
        log.info(msg);
    }

    StopWatch watch = new StopWatch();
    watch.start();

    // filter and get parents...
    int numEEs = 0;
    Collection<BioMaterial> biomaterials = new HashSet<BioMaterial>();
    Collection<FactorValue> factorValues = new HashSet<FactorValue>();
    Collection<Treatment> treatments = new HashSet<Treatment>();

    for (SearchResult sr : characterSearchResults) {
        Class<?> resultClass = sr.getResultClass();
        if (ExpressionExperiment.class.isAssignableFrom(resultClass)) {
            sr.setHighlightedText(sr.getHighlightedText() + " (characteristic)");
            results.add(sr);
            numEEs++;
        } else if (BioMaterial.class.isAssignableFrom(resultClass)) {
            biomaterials.add((BioMaterial) sr.getResultObject());
        } else if (FactorValue.class.isAssignableFrom(resultClass)) {
            factorValues.add((FactorValue) sr.getResultObject());
        } else if (Treatment.class.isAssignableFrom(resultClass)) {
            treatments.add((Treatment) sr.getResultObject());
        }
    }

    /*
     * Much faster to batch it...
     */
    if (biomaterials.size() > 0) {
        Collection<ExpressionExperiment> ees = expressionExperimentService.findByBioMaterials(biomaterials);
        for (ExpressionExperiment ee : ees) {
            results.add(new SearchResult(ee, INDIRECT_DB_HIT_PENALTY, "BioMaterial characteristic"));
        }
    }

    if (factorValues.size() > 0) {
        Collection<ExpressionExperiment> ees = expressionExperimentService.findByFactorValues(factorValues);
        for (ExpressionExperiment ee : ees) {
            if (log.isDebugEnabled())
                log.debug(ee);
            results.add(new SearchResult(ee, INDIRECT_DB_HIT_PENALTY, "Factor characteristic"));
        }
    }

    if (treatments.size() > 0) {
        log.info("Not processing treatments, but hits were found");
        // Collection<ExpressionExperiment> ees = expressionExperimentService.findByTreatments( treatments );
        // for ( ExpressionExperiment ee : ees ) {
        // if ( !results.contains( ee ) ) {
        // results.add( new SearchResult( ee, INDIRECT_DB_HIT_PENALTY, "Treatment" ) );
        // }
        // }
    }

    if (log.isDebugEnabled()) {
        log.debug(
                "ExpressionExperiment search: " + settings + " -> " + results.size() + " characteristic hits");
    }

    if (watch.getTime() > 1000) {
        log.info("Retrieving " + results.size() + " experiments from " + characterSearchResults.size()
                + " retrieved characteristics took " + watch.getTime() + " ms");
        log.info("Breakdown: " + numEEs + " via direct association with EE; " + biomaterials.size()
                + " via association with Biomaterial; " + factorValues.size() + " via experimental design");
    }

    return results;
}

From source file:org.kuali.rice.krad.service.impl.DictionaryValidationServiceImpl.java

/**
 * process constraints for the provided value using the provided constraint processors
 *
 * @param result - used to store the validation results
 * @param value - the object on which constraints are to be processed - a collection or the value of an attribute
 * @param definition - a Data Dictionary definition e.g. {@code ComplexAttributeDefinition} or {@code
 * CollectionDefinition}/*www  .  j  a va 2s. com*/
 * @param attributeValueReader - a class that encapsulate access to both dictionary metadata and object field
 * values
 * @param doOptionalProcessing - true if the validation should do optional validation, false otherwise
 */
@SuppressWarnings("unchecked")
private void processConstraints(DictionaryValidationResult result,
        List<? extends ConstraintProcessor> constraintProcessors, Object value, Constrainable definition,
        AttributeValueReader attributeValueReader, boolean doOptionalProcessing, String validationState,
        StateMapping stateMapping) {
    //TODO: Implement custom validators

    if (constraintProcessors != null) {
        Constrainable selectedDefinition = definition;
        AttributeValueReader selectedAttributeValueReader = attributeValueReader;

        // First - take the constrainable definition and get its constraints

        Queue<Constraint> constraintQueue = new LinkedList<Constraint>();

        // Using a for loop to iterate through constraint processors because ordering is important
        for (ConstraintProcessor<Object, Constraint> processor : constraintProcessors) {

            // Let the calling method opt out of any optional processing
            if (!doOptionalProcessing && processor.isOptional()) {
                result.addSkipped(attributeValueReader, processor.getName());
                continue;
            }

            Class<? extends Constraint> constraintType = processor.getConstraintType();

            // Add all of the constraints for this constraint type for all providers to the queue
            for (ConstraintProvider constraintProvider : constraintProviders) {
                if (constraintProvider.isSupported(selectedDefinition)) {
                    Collection<Constraint> constraintList = constraintProvider
                            .getConstraints(selectedDefinition, constraintType);
                    if (constraintList != null) {
                        constraintQueue.addAll(constraintList);
                    }
                }
            }

            // If there are no constraints provided for this definition, then just skip it
            if (constraintQueue.isEmpty()) {
                result.addSkipped(attributeValueReader, processor.getName());
                continue;
            }

            Collection<Constraint> additionalConstraints = new LinkedList<Constraint>();

            // This loop is functionally identical to a for loop, but it has the advantage of letting us keep the queue around
            // and populate it with any new constraints contributed by the processor
            while (!constraintQueue.isEmpty()) {

                Constraint constraint = constraintQueue.poll();

                // If this constraint is not one that this process handles, then skip and add to the queue for the next processor;
                // obviously this would be redundant (we're only looking at constraints that this processor can process) except that
                // the previous processor might have stuck a new constraint (or constraints) on the queue
                if (!constraintType.isInstance(constraint)) {
                    result.addSkipped(attributeValueReader, processor.getName());
                    additionalConstraints.add(constraint);
                    continue;
                }

                constraint = ConstraintStateUtils.getApplicableConstraint(constraint, validationState,
                        stateMapping);

                if (constraint != null) {
                    ProcessorResult processorResult = processor.process(result, value, constraint,
                            selectedAttributeValueReader);

                    Collection<Constraint> processorResultContraints = processorResult.getConstraints();
                    if (processorResultContraints != null && processorResultContraints.size() > 0) {
                        constraintQueue.addAll(processorResultContraints);
                    }

                    // Change the selected definition to whatever was returned from the processor
                    if (processorResult.isDefinitionProvided()) {
                        selectedDefinition = processorResult.getDefinition();
                    }
                    // Change the selected attribute value reader to whatever was returned from the processor
                    if (processorResult.isAttributeValueReaderProvided()) {
                        selectedAttributeValueReader = processorResult.getAttributeValueReader();
                    }
                }
            }

            // After iterating through all the constraints for this processor, add the ones that werent consumed by this processor to the queue
            constraintQueue.addAll(additionalConstraints);
        }
    }
}

From source file:de.uni_koblenz.jgralab.utilities.rsa2tg.Rsa2Tg.java

private void checkAttributes() {
    GraphClass graphClass = sg.getFirstGraphClass();
    Map<String, AttributedElementClass> definedAttributes = new HashMap<>();
    for (Attribute a : graphClass.get_attributes()) {
        if (definedAttributes.containsKey(a.get_name())) {
            throw new RuntimeException(
                    "Attribute " + a.get_name() + " at " + graphClass.get_qualifiedName() + " is duplicate.");
        }/*from   ww  w .  j a v  a2s .  c o  m*/
        definedAttributes.put(a.get_name(), graphClass);
    }

    for (GraphElementClass gec : sg.getGraphElementClassVertices()) {
        boolean isVertexClass = gec.isInstanceOf(VertexClass.VC);
        definedAttributes = new HashMap<>();
        BooleanGraphMarker alreadyChecked = new BooleanGraphMarker(sg);
        Queue<GraphElementClass> queue = new LinkedList<>();
        queue.add(gec);
        while (!queue.isEmpty()) {
            GraphElementClass current = queue.poll();
            if (alreadyChecked.isMarked(current)) {
                continue;
            }
            for (Attribute att : current.get_attributes()) {
                if (definedAttributes.containsKey(att.get_name())) {
                    AttributedElementClass childClass = definedAttributes.get(att.get_name());
                    throw new RuntimeException("The name of the "
                            + ((childClass == gec) && (current != gec) ? "" : "inherited ") + "attribute "
                            + att.get_name() + " of " + (isVertexClass ? "VertexClass" : "EdgeClass") + " "
                            + childClass.get_qualifiedName()
                            + (current == gec ? " is duplicate"
                                    : (" is the same name as the inherited attribute of "
                                            + (isVertexClass ? "VertexClass" : "EdgeClass") + " "
                                            + current.get_qualifiedName()))
                            + ".");
                } else {
                    definedAttributes.put(att.get_name(), current);
                }
            }
            alreadyChecked.mark(current);
            for (Edge toSuperClass : current.incidences(
                    isVertexClass ? SpecializesVertexClass.EC : SpecializesEdgeClass.EC, EdgeDirection.OUT)) {
                GraphElementClass superClass = (GraphElementClass) toSuperClass.getThat();
                if (!alreadyChecked.isMarked(superClass)) {
                    queue.add(superClass);
                }
            }
        }
    }
}

From source file:com.clxcommunications.xms.ApiConnectionIT.java

/**
 * Verifies that the default HTTP client actually can handle multiple
 * simultaneous requests./*from  www  . ja  va2  s  . c om*/
 * 
 * @throws Exception
 *             shouldn't happen
 */
@Test
public void canCancelBatchConcurrently() throws Exception {
    String spid = TestUtils.freshServicePlanId();

    // Set up the first request (the one that will be delayed).
    MtBatchSmsResult expected1 = MtBatchTextSmsResult.builder().sender("12345")
            .addRecipient("123456789", "987654321").body("Hello, world!").canceled(true)
            .id(TestUtils.freshBatchId()).createdAt(OffsetDateTime.now()).modifiedAt(OffsetDateTime.now())
            .build();

    String path1 = "/v1/" + spid + "/batches/" + expected1.id();
    byte[] response1 = json.writeValueAsBytes(expected1);

    wm.stubFor(delete(urlEqualTo(path1)).willReturn(aResponse().withFixedDelay(500) // Delay for a while.
            .withStatus(200).withHeader("Content-Type", "application/json; charset=UTF-8")
            .withBody(response1)));

    // Set up the second request.
    MtBatchSmsResult expected2 = MtBatchBinarySmsResult.builder().sender("12345")
            .addRecipient("123456789", "987654321").body("Hello, world!".getBytes()).udh((byte) 1)
            .canceled(true).id(TestUtils.freshBatchId()).createdAt(OffsetDateTime.now())
            .modifiedAt(OffsetDateTime.now()).build();

    String path2 = "/v1/" + spid + "/batches/" + expected2.id();

    stubDeleteResponse(expected2, path2);

    ApiConnection conn = ApiConnection.builder().servicePlanId(spid).token("tok")
            .endpoint("http://localhost:" + wm.port()).start();

    try {
        final Queue<MtBatchSmsResult> results = new ConcurrentArrayQueue<MtBatchSmsResult>();
        final CountDownLatch latch = new CountDownLatch(2);

        FutureCallback<MtBatchSmsResult> callback = new TestCallback<MtBatchSmsResult>() {

            @Override
            public void completed(MtBatchSmsResult result) {
                results.add(result);
                latch.countDown();
            }

        };

        conn.cancelBatchAsync(expected1.id(), callback);
        Thread.sleep(100);
        conn.cancelBatchAsync(expected2.id(), callback);

        // Wait for callback to be called.
        latch.await();

        // We expect the second message to be handled first.
        assertThat(results.size(), is(2));
        assertThat(results.poll(), is(expected2));
        assertThat(results.poll(), is(expected1));
    } finally {
        conn.close();
    }

    verifyDeleteRequest(path1);
    verifyDeleteRequest(path2);
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Save the vertices using the user-defined VertexOutputFormat from our
 * vertexArray based on the split./* ww w. ja  v  a2  s .c o  m*/
 *
 * @param numLocalVertices Number of local vertices
 * @throws InterruptedException
 */
private void saveVertices(long numLocalVertices) throws IOException, InterruptedException {
    ImmutableClassesGiraphConfiguration<I, V, E> conf = getConfiguration();

    if (conf.getVertexOutputFormatClass() == null) {
        LOG.warn("saveVertices: " + GiraphConstants.VERTEX_OUTPUT_FORMAT_CLASS
                + " not specified -- there will be no saved output");
        return;
    }
    if (conf.doOutputDuringComputation()) {
        if (LOG.isInfoEnabled()) {
            LOG.info("saveVertices: The option for doing output during "
                    + "computation is selected, so there will be no saving of the "
                    + "output in the end of application");
        }
        return;
    }

    final int numPartitions = getPartitionStore().getNumPartitions();
    int numThreads = Math.min(getConfiguration().getNumOutputThreads(), numPartitions);
    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO, "saveVertices: Starting to save "
            + numLocalVertices + " vertices " + "using " + numThreads + " threads");
    final VertexOutputFormat<I, V, E> vertexOutputFormat = getConfiguration().createWrappedVertexOutputFormat();

    final Queue<Integer> partitionIdQueue = (numPartitions == 0) ? new LinkedList<Integer>()
            : new ArrayBlockingQueue<Integer>(numPartitions);
    Iterables.addAll(partitionIdQueue, getPartitionStore().getPartitionIds());

    long verticesToStore = 0;
    PartitionStore<I, V, E> partitionStore = getPartitionStore();
    for (int partitionId : partitionStore.getPartitionIds()) {
        Partition<I, V, E> partition = partitionStore.getOrCreatePartition(partitionId);
        verticesToStore += partition.getVertexCount();
        partitionStore.putPartition(partition);
    }
    WorkerProgress.get().startStoring(verticesToStore, getPartitionStore().getNumPartitions());

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {
                /** How often to update WorkerProgress */
                private static final long VERTICES_TO_UPDATE_PROGRESS = 100000;

                @Override
                public Void call() throws Exception {
                    VertexWriter<I, V, E> vertexWriter = vertexOutputFormat.createVertexWriter(getContext());
                    vertexWriter.setConf(getConfiguration());
                    vertexWriter.initialize(getContext());
                    long nextPrintVertices = 0;
                    long nextUpdateProgressVertices = VERTICES_TO_UPDATE_PROGRESS;
                    long nextPrintMsecs = System.currentTimeMillis() + 15000;
                    int partitionIndex = 0;
                    int numPartitions = getPartitionStore().getNumPartitions();
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }

                        Partition<I, V, E> partition = getPartitionStore().getOrCreatePartition(partitionId);
                        long verticesWritten = 0;
                        for (Vertex<I, V, E> vertex : partition) {
                            vertexWriter.writeVertex(vertex);
                            ++verticesWritten;

                            // Update status at most every 250k vertices or 15 seconds
                            if (verticesWritten > nextPrintVertices
                                    && System.currentTimeMillis() > nextPrintMsecs) {
                                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                                        "saveVertices: Saved " + verticesWritten + " out of "
                                                + partition.getVertexCount() + " partition vertices, "
                                                + "on partition " + partitionIndex + " out of "
                                                + numPartitions);
                                nextPrintMsecs = System.currentTimeMillis() + 15000;
                                nextPrintVertices = verticesWritten + 250000;
                            }

                            if (verticesWritten >= nextUpdateProgressVertices) {
                                WorkerProgress.get().addVerticesStored(VERTICES_TO_UPDATE_PROGRESS);
                                nextUpdateProgressVertices += VERTICES_TO_UPDATE_PROGRESS;
                            }
                        }
                        getPartitionStore().putPartition(partition);
                        ++partitionIndex;
                        WorkerProgress.get().addVerticesStored(verticesWritten % VERTICES_TO_UPDATE_PROGRESS);
                        WorkerProgress.get().incrementPartitionsStored();
                    }
                    vertexWriter.close(getContext()); // the temp results are saved now
                    return null;
                }
            };
        }
    };
    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "save-vertices-%d", getContext());

    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO, "saveVertices: Done saving vertices.");
    // YARN: must complete the commit the "task" output, Hadoop isn't there.
    if (getConfiguration().isPureYarnJob() && getConfiguration().getVertexOutputFormatClass() != null) {
        try {
            OutputCommitter outputCommitter = vertexOutputFormat.getOutputCommitter(getContext());
            if (outputCommitter.needsTaskCommit(getContext())) {
                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                        "OutputCommitter: committing task output.");
                // transfer from temp dirs to "task commit" dirs to prep for
                // the master's OutputCommitter#commitJob(context) call to finish.
                outputCommitter.commitTask(getContext());
            }
        } catch (InterruptedException ie) {
            LOG.error("Interrupted while attempting to obtain " + "OutputCommitter.", ie);
        } catch (IOException ioe) {
            LOG.error("Master task's attempt to commit output has " + "FAILED.", ioe);
        }
    }
}

From source file:org.gvnix.flex.entity.ActionScriptEntityMetadataProvider.java

private void processJavaTypeChanged(String javaEntityId) {
    Queue<TypeMapping> relatedTypes = new LinkedList<TypeMapping>();
    List<ASFieldMetadata> processedProperties = new ArrayList<ASFieldMetadata>();

    JavaType javaType = PhysicalTypeIdentifier.getJavaType(javaEntityId);

    ActionScriptType asType = ActionScriptMappingUtils.toActionScriptType(javaType);
    String asEntityId = ASPhysicalTypeIdentifier.createIdentifier(asType, "src/main/flex");

    ASMutableClassOrInterfaceTypeDetails asTypeDetails = getASClassDetails(asEntityId);

    if (asTypeDetails == null) {
        return;// w w  w. j  a  va 2s  .co  m
    }

    // Verify that the ActionScript class is enabled for remoting
    if (!isRemotingClass(javaType, asTypeDetails)) {
        return;
    }

    List<ASFieldMetadata> declaredFields = asTypeDetails.getDeclaredFields();

    MemberDetails memberDetails = getMemberDetails(javaType);

    if (memberDetails == null) {
        return;
    }

    for (MethodMetadata method : MemberFindingUtils.getMethods(memberDetails)) {
        if (BeanInfoUtils.isMutatorMethod(method)) {
            JavaSymbolName propertyName = BeanInfoUtils.getPropertyNameForJavaBeanMethod(method);
            FieldMetadata javaField = BeanInfoUtils.getFieldForPropertyName(memberDetails, propertyName);

            // TODO - We don't add any meta-tags and we set the field to
            // public - any other choice? Probaby not until
            // we potentially add some sort of support for AS getters and
            // setters
            ASFieldMetadata asField = ActionScriptMappingUtils.toASFieldMetadata(asEntityId, javaField, true);

            int existingIndex = declaredFields.indexOf(asField);
            if (existingIndex > -1) {
                // Field already exists...does it need to be updated? Should
                // we even do this, or just assume if the
                // type is different that the user changed it intentionally.
                ASFieldMetadata existingField = declaredFields.get(existingIndex);
                if (!existingField.getFieldType().equals(asField.getFieldType())) {
                    asTypeDetails.updateField(asField, false);
                }
            } else {
                asTypeDetails.addField(asField, false);
            }

            relatedTypes.addAll(findRequiredMappings(javaField, asField));

            processedProperties.add(asField);
        }
    }

    // TODO - how should we handle fields that don't exist in the Java
    // object? For now we will just remove...should
    // add some way to turn this off later.
    for (ASFieldMetadata asField : asTypeDetails.getDeclaredFields()) {
        if (!processedProperties.contains(asField)) {
            asTypeDetails.removeField(asField.getFieldName());
        }
    }

    asTypeDetails.commit();

    // Now trigger the creation of any newly added related types
    while (!relatedTypes.isEmpty()) {
        TypeMapping mapping = relatedTypes.poll();
        createActionScriptMirrorClass(mapping.getMetadataId(), mapping.getAsType(), mapping.getJavaType());
    }
}