Example usage for java.util Queue remove

List of usage examples for java.util Queue remove

Introduction

In this page you can find the example usage for java.util Queue remove.

Prototype

E remove();

Source Link

Document

Retrieves and removes the head of this queue.

Usage

From source file:edu.brown.hstore.dtxn.LocalTransaction.java

/**
 * Store a VoltTable result that this transaction is waiting for.
 * @param partition The partition id that generated the result
 * @param dependency_id The dependency id that this result corresponds to
 * @param key The hackish partition+dependency key
 * @param force If false, then we will check to make sure the result isn't a duplicate
 * @param result The actual data for the result
 *///from  w  w  w . ja  va  2s . com
private void addResult(final int partition, final int dependency_id, final int key, final boolean force,
        VoltTable result) {
    final int base_offset = hstore_site.getLocalPartitionOffset(this.base_partition);
    assert (result != null);
    assert (this.round_state[base_offset] == RoundState.INITIALIZED
            || this.round_state[base_offset] == RoundState.STARTED) : String.format(
                    "Invalid round state %s for %s at partition %d", this.round_state[base_offset], this,
                    this.base_partition);

    if (d)
        LOG.debug(String.format("%s - Attemping to add new result for %s [numRows=%d]", this,
                debugPartDep(partition, dependency_id), result.getRowCount()));

    // If the txn is still in the INITIALIZED state, then we just want to queue up the results
    // for now. They will get released when we switch to STARTED 
    // This is the only part that we need to synchonize on
    if (force == false) {
        if (this.predict_singlePartition == false)
            this.state.lock.lock();
        try {
            if (this.round_state[base_offset] == RoundState.INITIALIZED) {
                assert (this.state.queued_results.containsKey(key) == false) : String.format(
                        "%s - Duplicate result %s [key=%d]", this, debugPartDep(partition, dependency_id), key);
                this.state.queued_results.put(key, result);
                if (d)
                    LOG.debug(String.format("%s - Queued result %s until the round is started [key=%s]", this,
                            debugPartDep(partition, dependency_id), key));
                return;
            }
            if (d) {
                LOG.debug(String.format("%s - Storing new result for key %d", this, key));
                if (t)
                    LOG.trace("Result stmt_ctr(key=" + key + "): "
                            + this.state.results_dependency_stmt_ctr.get(key));
            }
        } finally {
            if (this.predict_singlePartition == false)
                this.state.lock.unlock();
        } // SYNCH
    }

    // Each partition+dependency_id should be unique within the Statement batch.
    // So as the results come back to us, we have to figure out which Statement it belongs to
    DependencyInfo dinfo = null;
    Queue<Integer> queue = this.state.results_dependency_stmt_ctr.get(key);
    assert (queue != null) : String.format("Unexpected %s in %s", debugPartDep(partition, dependency_id), this);
    assert (queue.isEmpty() == false) : String.format(
            "No more statements for %s in %s [key=%d]\nresults_dependency_stmt_ctr = %s",
            debugPartDep(partition, dependency_id), this, key, this.state.results_dependency_stmt_ctr);

    int stmt_index = queue.remove().intValue();
    dinfo = this.getDependencyInfo(dependency_id);
    assert (dinfo != null) : String.format("Unexpected %s for %s [stmt_index=%d]\n%s",
            debugPartDep(partition, dependency_id), this, stmt_index, result);
    dinfo.addResult(partition, result);

    if (this.predict_singlePartition == false)
        this.state.lock.lock();
    try {
        this.state.received_ctr++;

        // Check whether we need to start running stuff now
        // 2011-12-31: This needs to be synchronized because they might check
        //             whether there are no more blocked tasks before we 
        //             can add to_unblock to the unblocked_tasks queue
        if (this.state.blocked_tasks.isEmpty() == false && dinfo.hasTasksReady()) {
            Collection<WorkFragment> to_unblock = dinfo.getAndReleaseBlockedWorkFragments();
            assert (to_unblock != null);
            assert (to_unblock.isEmpty() == false);
            if (d)
                LOG.debug(String.format(
                        "%s - Got %d WorkFragments to unblock that were waiting for DependencyId %d", this,
                        to_unblock.size(), dinfo.getDependencyId()));
            this.state.blocked_tasks.removeAll(to_unblock);
            this.state.unblocked_tasks.addLast(to_unblock);
        } else if (d) {
            LOG.debug(String.format(
                    "%s - No WorkFragments to unblock after storing %s [blockedTasks=%d, hasTasksReady=%s]",
                    this, debugPartDep(partition, dependency_id), this.state.blocked_tasks.size(),
                    dinfo.hasTasksReady()));
        }

        if (this.state.dependency_latch != null) {
            this.state.dependency_latch.countDown();

            // HACK: If the latch is now zero, then push an EMPTY set into the unblocked queue
            // This will cause the blocked PartitionExecutor thread to wake up and realize that he's done
            if (this.state.dependency_latch.getCount() == 0) {
                if (d)
                    LOG.debug(String.format(
                            "%s - Pushing EMPTY_SET to PartitionExecutor because all the dependencies have arrived!",
                            this));
                this.state.unblocked_tasks.addLast(EMPTY_FRAGMENT_SET);
            }
            if (d)
                LOG.debug(String.format("%s - Setting CountDownLatch to %d", this,
                        this.state.dependency_latch.getCount()));
        }

        this.state.still_has_tasks = this.state.blocked_tasks.isEmpty() == false
                || this.state.unblocked_tasks.isEmpty() == false;
    } finally {
        if (this.predict_singlePartition == false)
            this.state.lock.unlock();
    } // SYNCH

    if (d) {
        Map<String, Object> m = new ListOrderedMap<String, Object>();
        m.put("Blocked Tasks", (this.state != null ? this.state.blocked_tasks.size() : null));
        m.put("DependencyInfo", dinfo.toString());
        m.put("hasTasksReady", dinfo.hasTasksReady());
        LOG.debug(this + " - Status Information\n" + StringUtil.formatMaps(m));
        if (t)
            LOG.trace(this.debug());
    }
}

From source file:ome.services.graphs.GraphPathBean.java

/**
 * Process the Hibernate domain object model to initialize this class' instance fields.
 * No other method should write to them.
 * @param sessionFactory the Hibernate session factory
 *///from w ww .j a  va  2  s . com
private void initialize(SessionFactoryImplementor sessionFactory) {
    /* note all the direct superclasses */
    final Map<String, String> superclasses = new HashMap<String, String>();
    final Map<String, ClassMetadata> classesMetadata = sessionFactory.getAllClassMetadata();
    for (final String className : classesMetadata.keySet()) {
        try {
            final Class<?> actualClass = Class.forName(className);
            if (IObject.class.isAssignableFrom(actualClass)) {
                classesBySimpleName.put(actualClass.getSimpleName(), actualClass.asSubclass(IObject.class));
                final Set<String> subclassNames = sessionFactory.getEntityPersister(className)
                        .getEntityMetamodel().getSubclassEntityNames();
                for (final String subclassName : subclassNames) {
                    if (!subclassName.equals(className)) {
                        final Class<?> actualSubclass = Class.forName(subclassName);
                        if (actualSubclass.getSuperclass() == actualClass) {
                            superclasses.put(subclassName, className);
                        }
                    }
                }
            } else {
                log.warn("mapped class " + className + " is not a " + IObject.class.getName());
            }
        } catch (ClassNotFoundException e) {
            log.error("could not instantiate class", e);
        }
    }
    /* note the indirect superclasses and subclasses */
    for (final Entry<String, String> superclassRelationship : superclasses.entrySet()) {
        final String startClass = superclassRelationship.getKey();
        String superclass = superclassRelationship.getValue();
        while (superclass != null) {
            allSuperclasses.put(startClass, superclass);
            allSubclasses.put(superclass, startClass);
            superclass = superclasses.get(superclass);
        }
    }
    /* queue for processing all the properties of all the mapped entities: name, type, nullability */
    final Queue<PropertyDetails> propertyQueue = new LinkedList<PropertyDetails>();
    final Map<String, Set<String>> allPropertyNames = new HashMap<String, Set<String>>();
    for (final Entry<String, ClassMetadata> classMetadata : classesMetadata.entrySet()) {
        final String className = classMetadata.getKey();
        final ClassMetadata metadata = classMetadata.getValue();
        /* note name of identifier property */
        classIdProperties.put(metadata.getEntityName(), metadata.getIdentifierPropertyName());
        /* queue other properties */
        final String[] propertyNames = metadata.getPropertyNames();
        final Type[] propertyTypes = metadata.getPropertyTypes();
        final boolean[] propertyNullabilities = metadata.getPropertyNullability();
        for (int i = 0; i < propertyNames.length; i++) {
            final List<String> propertyPath = Collections.singletonList(propertyNames[i]);
            propertyQueue.add(
                    new PropertyDetails(className, propertyPath, propertyTypes[i], propertyNullabilities[i]));
        }
        final Set<String> propertyNamesSet = new HashSet<String>(propertyNames.length);
        propertyNamesSet.addAll(Arrays.asList(propertyNames));
        allPropertyNames.put(className, propertyNamesSet);
    }
    /* process each property to note entity linkages */
    while (!propertyQueue.isEmpty()) {
        final PropertyDetails property = propertyQueue.remove();
        if (ignoreProperty(property.path.get(property.path.size() - 1))) {
            continue;
        }
        /* if the property has a component type, queue the parts for processing */
        if (property.type instanceof ComponentType) {
            final ComponentType componentType = (ComponentType) property.type;
            final String[] componentPropertyNames = componentType.getPropertyNames();
            final Type[] componentPropertyTypes = componentType.getSubtypes();
            final boolean[] componentPropertyNullabilities = componentType.getPropertyNullability();
            for (int i = 0; i < componentPropertyNames.length; i++) {
                final List<String> componentPropertyPath = new ArrayList<String>(property.path.size() + 1);
                componentPropertyPath.addAll(property.path);
                componentPropertyPath.add(componentPropertyNames[i]);
                propertyQueue.add(new PropertyDetails(property.holder, componentPropertyPath,
                        componentPropertyTypes[i], componentPropertyNullabilities[i]));
            }
        } else {
            /* determine if another mapped entity class is linked by this property */
            final boolean isAssociatedEntity;
            if (property.type instanceof CollectionType) {
                final CollectionType ct = (CollectionType) property.type;
                isAssociatedEntity = sessionFactory.getCollectionPersister(ct.getRole()).getElementType()
                        .isEntityType();
            } else {
                isAssociatedEntity = property.type instanceof AssociationType;
            }
            /* the property can link to entities, so process it further */
            String propertyPath = Joiner.on('.').join(property.path);
            /* find if the property is accessible (e.g., not protected) */
            boolean propertyIsAccessible = false;
            String classToInstantiateName = property.holder;
            Class<?> classToInstantiate = null;
            try {
                classToInstantiate = Class.forName(classToInstantiateName);
                while (Modifier.isAbstract(classToInstantiate.getModifiers())) {
                    classToInstantiateName = allSubclasses.get(classToInstantiateName).iterator().next();
                    classToInstantiate = Class.forName(classToInstantiateName);
                }
                try {
                    PropertyUtils.getNestedProperty(classToInstantiate.newInstance(), propertyPath);
                    propertyIsAccessible = true;
                } catch (NoSuchMethodException e) {
                    /* expected for collection properties */
                } catch (NestedNullException e) {
                    log.warn("guessing " + propertyPath + " of " + property.holder + " to be accessible");
                    propertyIsAccessible = true;
                }
            } catch (ReflectiveOperationException e) {
                log.error("could not probe property " + propertyPath + " of " + property.holder, e);
                continue;
            }
            /* build property report line for log */
            final char arrowShaft = property.isNullable ? '-' : '=';
            final StringBuffer sb = new StringBuffer();
            sb.append(property.holder);
            sb.append(' ');
            for (final String propertyName : property.path) {
                sb.append(arrowShaft);
                sb.append(arrowShaft);
                sb.append(propertyName);
            }
            sb.append(arrowShaft);
            sb.append(arrowShaft);
            sb.append("> ");
            final String valueClassName;
            if (isAssociatedEntity) {
                valueClassName = ((AssociationType) property.type).getAssociatedEntityName(sessionFactory);
                sb.append(valueClassName);
            } else {
                valueClassName = null;
                sb.append("value");
            }
            if (property.type.isCollectionType()) {
                sb.append("[]");
            }
            if (!propertyIsAccessible) {
                sb.append(" (inaccessible)");
            }
            /* determine from which class the property is inherited, if at all */
            String superclassWithProperty = null;
            String currentClass = property.holder;
            while (true) {
                currentClass = superclasses.get(currentClass);
                if (currentClass == null) {
                    break;
                } else if (allPropertyNames.get(currentClass).contains(property.path.get(0))) {
                    superclassWithProperty = currentClass;
                }
            }
            /* check if the property actually comes from an interface */
            final String declaringClassName = superclassWithProperty == null ? property.holder
                    : superclassWithProperty;
            final Class<? extends IObject> interfaceForProperty = getInterfaceForProperty(declaringClassName,
                    property.path.get(0));
            /* report where the property is declared */
            if (superclassWithProperty != null) {
                sb.append(" from ");
                sb.append(superclassWithProperty);
            } else {
                if (interfaceForProperty != null) {
                    sb.append(" see ");
                    sb.append(interfaceForProperty.getName());
                    /* It would be nice to set PropertyDetails to have the interface as the holder,
                     * but then properties would not be unique by declarer class and instance ID. */
                }
                /* entity linkages by non-inherited properties are recorded */
                if (valueClassName == null && property.path.size() > 1) {
                    /* assume that the top-level property suffices for describing simple properties */
                    log.debug("recording " + propertyPath + " as " + property.path.get(0));
                    propertyPath = property.path.get(0);
                }
                final Entry<String, String> classPropertyName = Maps.immutableEntry(property.holder,
                        propertyPath);
                if (valueClassName == null) {
                    simpleProperties.put(property.holder, propertyPath);
                } else {
                    linkedTo.put(property.holder, Maps.immutableEntry(valueClassName, propertyPath));
                    linkedBy.put(valueClassName, classPropertyName);
                }
                final PropertyKind propertyKind;
                if (property.type.isCollectionType()) {
                    propertyKind = PropertyKind.COLLECTION;
                } else if (property.isNullable) {
                    propertyKind = PropertyKind.OPTIONAL;
                } else {
                    propertyKind = PropertyKind.REQUIRED;
                }
                propertyKinds.put(classPropertyName, propertyKind);
                if (propertyIsAccessible) {
                    accessibleProperties.add(classPropertyName);
                }
            }
            if (log.isDebugEnabled()) {
                log.debug(sb.toString());
            }
        }
    }
    log.info("initialized graph path bean with " + propertyKinds.size() + " properties");
}

From source file:it.geosolutions.geobatch.actions.freemarker.FreeMarkerAction.java

/**
 * Removes TemplateModelEvents from the queue and put
 *//*w  w w  .  j a v  a 2s  .co  m*/
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.started();

    listenerForwarder.setTask("initializing the FreeMarker engine");
    if (!initialized) {
        try {
            initialize();
        } catch (IllegalArgumentException e) {
            throw new ActionException(this, e.getLocalizedMessage(), e.getCause());
        } catch (IOException e) {
            throw new ActionException(this, e.getLocalizedMessage(), e.getCause());
        }
    }

    listenerForwarder.setTask("build the output absolute file name");

    // build the output absolute file name
    File outputDir = computeOutputDir(); // may throw ActionEx

    // return
    final Queue<EventObject> ret = new LinkedList<EventObject>();

    listenerForwarder.setTask("Building/getting the root data structure");
    /*
     * Building/getting the root data structure
     */
    final Map<String, Object> root = conf.getRoot() != null ? conf.getRoot() : new HashMap<String, Object>();

    // list of incoming event to inject into the root datamodel
    final List<TemplateModel> list;
    if (conf.isNtoN()) {
        list = new ArrayList<TemplateModel>(events.size());
    } else {
        list = new ArrayList<TemplateModel>(1);
    }
    // append the list of adapted event objects
    root.put(TemplateModelEvent.EVENT_KEY, list);

    while (!events.isEmpty()) {
        // the adapted event
        final TemplateModelEvent ev;
        final TemplateModel dataModel;
        try {
            if ((ev = adapter(events.remove())) != null) {
                listenerForwarder.setTask("Try to get a Template DataModel from the Adapted event");
                // try to get a Template DataModel from the Adapted event
                dataModel = ev.getModel(processor);

            } else {
                final String message = "Unable to append the event: unrecognized format. SKIPPING...";
                if (LOGGER.isErrorEnabled()) {
                    LOGGER.error(message);
                }
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    final ActionException e = new ActionException(this, message);
                    listenerForwarder.failed(e);
                    throw e;
                }
            }
        } catch (TemplateModelException tme) {
            final String message = "Unable to wrap the passed object: " + tme.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            if (conf.isFailIgnored()) {
                continue;
            } else {
                listenerForwarder.failed(tme);
                throw new ActionException(this, tme.getLocalizedMessage());
            }
        } catch (Exception ioe) {
            final String message = "Unable to produce the output: " + ioe.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            if (conf.isFailIgnored()) {
                continue;
            } else {
                listenerForwarder.failed(ioe);
                throw new ActionException(this, ioe.getLocalizedMessage(), ioe);
            }
        }

        listenerForwarder.setTask("Generating the output");
        /*
         * If getNtoN: For each data incoming event (Template DataModel)
         * build a file. Otherwise the entire queue of incoming object will
         * be transformed in a list of datamodel. In this case only one file
         * is generated.
         */
        if (conf.isNtoN()) {

            if (list.size() > 0) {
                list.remove(0);
            }
            list.add(dataModel);

            final File outputFile;
            // append the incoming data structure
            try {
                outputFile = buildOutput(outputDir, root);
            } catch (ActionException e) {
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(e.getLocalizedMessage(), e);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(e);
                    throw e;
                }
            }
            // add the file to the return
            ret.add(new FileSystemEvent(outputFile.getAbsoluteFile(), FileSystemEventType.FILE_ADDED));
        } else {
            list.add(dataModel);
        }
    }

    if (!conf.isNtoN()) {
        final File outputFile;
        // append the incoming data structure
        try {
            outputFile = buildOutput(outputDir, root);
        } catch (ActionException e) {
            if (LOGGER.isErrorEnabled())
                LOGGER.error(e.getLocalizedMessage(), e);
            listenerForwarder.failed(e);
            throw e;
        }
        // add the file to the return
        ret.add(new FileSystemEvent(outputFile.getAbsoluteFile(), FileSystemEventType.FILE_ADDED));
    }

    listenerForwarder.completed();
    return ret;
}

From source file:com.dell.asm.asmcore.asmmanager.util.template.adjuster.ClusterAdjuster.java

/**
 * If has NO server, create predefined VDS and port group:
 * PXE VDS - [User selects from VDS available in the datacenter]
 * PXE Port Group - [ User selects from available port groups on the PXE VDS]
 * Workload VDS - [ User selects from VDS available in the datacenter]
 *
 * @param cluster/*from  ww w . j a va  2 s  . c om*/
 * @param allNetworks
 * @param hasServer
 */
private void refineClusterByServerNetworks(ServiceTemplateComponent cluster,
        List<PartitionNetworks> allNetworks, boolean hasServer) {
    // check if it is vCenter cluster
    ServiceTemplateCategory vdsCategory = cluster
            .getTemplateResource(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID);
    if (vdsCategory == null)
        return;

    int v = 1;
    ServiceTemplateSetting vdsNameZero = cluster.getParameter(
            ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
            ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_NAME_ID);

    // newly added VDS members
    List<ServiceTemplateSetting> vdsAdded = new ArrayList<>();

    ServiceTemplateSetting vdsNew = null;

    if (hasServer) {

        //Restore option Enable VMWare vSAN if server is associated with the cluster          
        ServiceTemplateSetting enableVmwareVsan = cluster.getParameter(
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_ID,
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_CLUSTER_VSAN_ID);
        if (enableVmwareVsan != null) {
            enableVmwareVsan.setHideFromTemplate(false);
        }

        // first need to count some networks
        List<Network> iscsiNets = new ArrayList<>();
        List<Network> vsanNets = new ArrayList<>();

        for (PartitionNetworks pn : allNetworks) {
            for (Network nConfig : pn.getNetworks()) {
                if (NetworkType.STORAGE_ISCSI_SAN.equals(nConfig.getType())) {
                    // replace "iscsi" in the network ID by combination of sorted ISCSI net IDs
                    List<String> sortedNetIDs = pn.sortISCSINetworks();
                    nConfig.setId(StringUtils.join(sortedNetIDs, "-"));

                    // will need to count later
                    if (!iscsiNets.contains(nConfig)) {
                        iscsiNets.add(nConfig);
                    }
                }
            }
        }

        for (PartitionNetworks pn : allNetworks) {
            pn.sortById();

            ServiceTemplateSetting vdsName = cluster.getParameter(
                    ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
                    ServiceTemplateClientUtil.createVDSID(pn.getId()));

            String uiGroupName = "VDS " + v;
            if (vdsName == null) {
                vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory,
                        ServiceTemplateClientUtil.createVDSID(pn.getId()), "VDS Name", uiGroupName,
                        ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
            } else {
                // upgrade options only
                vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
            }

            // hard reset for UI group
            vdsName.setGroup(uiGroupName);

            vdsName.setHideFromTemplate(false);
            vdsAdded.add(vdsName);

            // $new$
            vdsNew = vdsCategory.getParameter(
                    ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId());
            if (vdsNew != null) {
                vdsNew.setGroup(uiGroupName);
                vdsAdded.add(vdsNew);
            }

            if (pn.hasManagementNetwork()) {
                vdsName.setRequired(true);
                if (vdsNew != null)
                    vdsNew.setRequired(true);
            }

            // for each network find or create PG
            Queue<NetworkObject> iscsiNetworkFIFO = new LinkedList<>();
            iscsiNetworkFIFO.addAll(pn.getIscsiNetworks());

            for (Network nConfig : pn.getNetworks()) {
                Queue<NetworkObject> currentQueue = null;
                String portGroupName = nConfig.getName() + " Port Group";
                int cnt = 1;
                if (NetworkType.STORAGE_ISCSI_SAN.equals(nConfig.getType())) {
                    currentQueue = iscsiNetworkFIFO;
                    if (iscsiNets.size() == 1) {
                        cnt = 2; // 2 PG but only if we have one ISCSI network.
                    }
                }

                boolean incrementPortGroup = (cnt > 1 && currentQueue.size() == 1);
                // multiple PGs for certain networks
                for (int j = 1; j <= cnt; j++) {
                    String currGroupName = portGroupName;
                    String portGroupSufix = "";
                    if (incrementPortGroup) {
                        portGroupSufix = " " + j;
                    }

                    String pgNetworkID = nConfig.getId();
                    // can be only 1 or 2 ISCSI.
                    // But we always need 2 port groups for such networks.
                    // Names and IDs have to be picked from dedicated list
                    if (pgNetworkID.contains("-") && currentQueue != null) {
                        NetworkObject networkObject = currentQueue.remove();
                        if (networkObject != null) {
                            pgNetworkID = networkObject.getId();
                            currGroupName = networkObject.getName() + " Port Group";
                        }
                    }

                    currGroupName += portGroupSufix;

                    ServiceTemplateSetting vdsPG = ServiceTemplateClientUtil.getPortGroup(cluster, pn.getId(),
                            currGroupName, pgNetworkID, j, true);
                    if (vdsPG == null) {
                        // unexpected...
                        LOGGER.error("getPortGroup returned null for VDS ID=" + pn.getId() + ", PG="
                                + currGroupName);
                        throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR,
                                AsmManagerMessages.internalError());
                    }
                    vdsPG.setDisplayName(currGroupName);
                    vdsPG.setHideFromTemplate(false);
                    vdsPG.setGroup(uiGroupName);

                    vdsAdded.add(vdsPG);
                    // $new$
                    vdsNew = vdsCategory.getParameter(
                            ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsPG.getId());
                    if (vdsNew != null) {
                        vdsNew.setGroup(uiGroupName);
                        vdsAdded.add(vdsNew);
                    }

                    if (NetworkType.PXE.equals(nConfig.getType())
                            || NetworkType.HYPERVISOR_MANAGEMENT.equals(nConfig.getType())) {
                        vdsPG.setRequired(true);
                        if (vdsNew != null)
                            vdsNew.setRequired(true);
                    }

                }

            }

            v++;
        }

    } else {

        //Remove option Enable VMWare vSAN if server is not associated with the cluster          
        ServiceTemplateSetting enableVmwareVsan = cluster.getParameter(
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_ID,
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_CLUSTER_VSAN_ID);
        if (enableVmwareVsan != null) {
            enableVmwareVsan.setHideFromTemplate(true);
        }

        ServiceTemplateSetting vdsName = cluster.getParameter(
                ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
                ServiceTemplateClientUtil.createVDSID("pxe"));

        if (vdsName == null) {
            vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory,
                    ServiceTemplateClientUtil.createVDSID("pxe"), "VDS Name", "PXE VDS",
                    ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));

            vdsName.setHideFromTemplate(false);
        } else {
            vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
        }

        vdsAdded.add(vdsName);
        vdsNew = vdsCategory
                .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId());
        if (vdsNew != null) {
            vdsAdded.add(vdsNew);
        }

        // PXE Port Group
        ServiceTemplateSetting vdsPG = ServiceTemplateClientUtil.getPortGroup(cluster, "pxe", "PXE Port Group",
                "pxe", 1, true);
        if (vdsPG == null) {
            // unexpected...
            LOGGER.error("getPortGroup returned null for VDS ID=pxe" + ", PG=PXE Port Group");
            throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR,
                    AsmManagerMessages.internalError());
        }
        vdsPG.setDisplayName("PXE Port Group");
        vdsPG.setHideFromTemplate(false);

        vdsAdded.add(vdsPG);
        vdsNew = vdsCategory
                .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsPG.getId());
        if (vdsNew != null) {
            vdsAdded.add(vdsNew);
        }

        vdsName = cluster.getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID,
                ServiceTemplateClientUtil.createVDSID("workload"));

        if (vdsName == null) {
            vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory,
                    ServiceTemplateClientUtil.createVDSID("workload"), "VDS Name", "Workload VDS",
                    ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));

            vdsName.setHideFromTemplate(false);
            vdsName.setRequired(true);
        } else {
            vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null));
        }

        vdsAdded.add(vdsName);
        vdsNew = vdsCategory
                .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId());
        if (vdsNew != null) {
            vdsNew.setRequired(true);
            vdsAdded.add(vdsNew);
        }

    }

    // remove old VDS names / PGs
    List<ServiceTemplateSetting> toRemove = new ArrayList<>();
    for (ServiceTemplateSetting vdsName : vdsCategory.getParameters()) {
        if (!vdsName.getId().contains(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_NAME_ID + "::")
                && !vdsName.getId()
                        .contains(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_PG_ID + "::"))
            continue;

        toRemove.add(vdsName);
    }
    vdsCategory.getParameters().removeAll(toRemove);

    // re-add new parameters
    vdsCategory.getParameters().addAll(vdsAdded);

}

From source file:org.geoserver.wms.legendgraphic.ColorMapLegendCreator.java

private BufferedImage mergeRows(Queue<BufferedImage> legendsQueue) {
    // I am doing a straight cast since I know that I built this
    // dimension object by using the widths and heights of the various
    // bufferedimages for the various bkgColor map entries.
    final Dimension finalDimension = new Dimension();
    final int numRows = legendsQueue.size();
    finalDimension.setSize(Math.max(footerW, colorW + ruleW + labelW) + 2 * dx + 2 * margin,
            rowH * numRows + 2 * margin + (numRows - 1) * dy);

    final int totalWidth = (int) finalDimension.getWidth();
    final int totalHeight = (int) finalDimension.getHeight();
    BufferedImage finalLegend = ImageUtils.createImage(totalWidth, totalHeight, (IndexColorModel) null,
            transparent);//w  w  w .j a va  2  s. co  m

    /*
     * For RAMP type, only HORIZONTAL or VERTICAL condition is valid
     */
    if (colorMapType == ColorMapType.RAMP) {

        final Map<Key, Object> hintsMap = new HashMap<Key, Object>();
        Graphics2D finalGraphics = ImageUtils.prepareTransparency(transparent, backgroundColor, finalLegend,
                hintsMap);
        hintsMap.put(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
        finalGraphics.setRenderingHints(hintsMap);

        int topOfRow = (int) (margin + 0.5);
        for (int i = 0; i < numRows; i++) {
            final BufferedImage img = legendsQueue.remove();

            // draw the image
            finalGraphics.drawImage(img, (int) (margin + 0.5), topOfRow, null);
            topOfRow += img.getHeight() + dy;

        }

        if (this.layout == LegendLayout.HORIZONTAL) {
            BufferedImage newImage = new BufferedImage(totalHeight, totalWidth, finalLegend.getType());
            Graphics2D g2 = newImage.createGraphics();
            g2.rotate(-Math.PI / 2, 0, 0);
            g2.drawImage(finalLegend, null, -totalWidth, 0);
            finalLegend = newImage;
            g2.dispose();
            finalGraphics.dispose();
        }
    } else {
        List<RenderedImage> imgs = new ArrayList<RenderedImage>(legendsQueue);

        LegendMerger.MergeOptions options = new LegendMerger.MergeOptions(imgs, (int) dx, (int) dy,
                (int) margin, 0, backgroundColor, transparent, true, layout, rowWidth, rows, columnHeight,
                columns, null, false, false);
        finalLegend = LegendMerger.mergeRasterLegends(options);
    }

    return finalLegend;

}

From source file:org.apache.synapse.transport.nhttp.HttpCoreNIOListener.java

private void startEndpoints() throws AxisFault {
    Queue<ListenerEndpoint> endpoints = new LinkedList<ListenerEndpoint>();

    Set<InetSocketAddress> addressSet = new HashSet<InetSocketAddress>();
    addressSet.addAll(connFactory.getBindAddresses());
    if (NHttpConfiguration.getInstance().getMaxActiveConnections() != -1) {
        addMaxConnectionCountController(NHttpConfiguration.getInstance().getMaxActiveConnections());
    }/*from w w w.  j a  v  a 2  s.c o  m*/
    if (listenerContext.getBindAddress() != null) {
        addressSet.add(new InetSocketAddress(listenerContext.getBindAddress(), listenerContext.getPort()));
    }
    if (addressSet.isEmpty()) {
        addressSet.add(new InetSocketAddress(listenerContext.getPort()));
    }

    // Ensure simple but stable order
    List<InetSocketAddress> addressList = new ArrayList<InetSocketAddress>(addressSet);
    Collections.sort(addressList, new Comparator<InetSocketAddress>() {

        public int compare(InetSocketAddress a1, InetSocketAddress a2) {
            String s1 = a1.toString();
            String s2 = a2.toString();
            return s1.compareTo(s2);
        }

    });
    for (InetSocketAddress address : addressList) {
        endpoints.add(ioReactor.listen(address));
    }

    // Wait for the endpoint to become ready, i.e. for the listener to start accepting
    // requests.
    while (!endpoints.isEmpty()) {
        ListenerEndpoint endpoint = endpoints.remove();
        try {
            endpoint.waitFor();
            if (log.isInfoEnabled()) {
                InetSocketAddress address = (InetSocketAddress) endpoint.getAddress();
                if (!address.isUnresolved()) {
                    log.info(name + " started on " + address.getHostName() + ":" + address.getPort());
                } else {
                    log.info(name + " started on " + address);
                }
            }
        } catch (InterruptedException e) {
            log.warn("Listener startup was interrupted");
            break;
        }
    }
}

From source file:org.codice.ddf.opensearch.source.OpenSearchSource.java

/**
 * Method to combine spatial searches into either geometry collection or a bounding box.
 * OpenSearch endpoints and the query framework allow for multiple spatial query parameters. This
 * method has been refactored out and is protected so that downstream projects may try to
 * implement another algorithm (e.g. best-effort) to combine searches.
 *
 * @return null if there is no search specified, or a {@linkSpatialSearch} with one search that is
 *     the combination of all of the spatial criteria
 *//*from  w  ww.  jav a 2s  . c o m*/
@Nullable
protected SpatialSearch createCombinedSpatialSearch(final Queue<PointRadius> pointRadiusSearches,
        final Queue<Geometry> geometrySearches, final int numMultiPointRadiusVertices,
        final int distanceTolerance) {
    Geometry geometrySearch = null;
    BoundingBox boundingBox = null;
    PointRadius pointRadius = null;
    SpatialSearch spatialSearch = null;

    Set<Geometry> combinedGeometrySearches = new HashSet<>(geometrySearches);

    if (CollectionUtils.isNotEmpty(pointRadiusSearches)) {
        if (shouldConvertToBBox) {
            for (PointRadius search : pointRadiusSearches) {
                BoundingBox bbox = BoundingBoxUtils.createBoundingBox(search);
                List bboxCoordinate = BoundingBoxUtils.getBoundingBoxCoordinatesList(bbox);
                List<List> coordinates = new ArrayList<>();
                coordinates.add(bboxCoordinate);
                combinedGeometrySearches.add(ddf.geo.formatter.Polygon.buildPolygon(coordinates));
                LOGGER.trace(
                        "Point radius searches are converted to a (rough approximation) square using Vincenty's formula (direct)");
            }
        } else {
            if (pointRadiusSearches.size() == 1) {
                pointRadius = pointRadiusSearches.remove();
            } else {
                for (PointRadius search : pointRadiusSearches) {
                    Geometry circle = GeospatialUtil.createCirclePolygon(search.getLat(), search.getLon(),
                            search.getRadius(), numMultiPointRadiusVertices, distanceTolerance);
                    combinedGeometrySearches.add(circle);
                    LOGGER.trace("Point radius searches are converted to a polygon with a max of {} vertices.",
                            numMultiPointRadiusVertices);
                }
            }
        }
    }

    if (CollectionUtils.isNotEmpty(combinedGeometrySearches)) {
        // if there is more than one geometry, create a geometry collection
        if (combinedGeometrySearches.size() > 1) {
            geometrySearch = GEOMETRY_FACTORY
                    .createGeometryCollection(combinedGeometrySearches.toArray(new Geometry[0]));
        } else {
            geometrySearch = combinedGeometrySearches.iterator().next();
        }

        /**
         * If convert to bounding box is enabled, extracts the approximate envelope. In the case of
         * multiple geometry, a large approximate envelope encompassing all of the geometry is
         * returned. Area between the geometries are also included in this spatial search. Hence widen
         * the search area.
         */
        if (shouldConvertToBBox) {
            if (combinedGeometrySearches.size() > 1) {
                LOGGER.trace(
                        "An approximate envelope encompassing all the geometry is returned. Area between the geometries are also included in this spatial search. Hence widen the search area.");
            }
            boundingBox = BoundingBoxUtils.createBoundingBox((Polygon) geometrySearch.getEnvelope());
            geometrySearch = null;
        }
    }

    if (geometrySearch != null || boundingBox != null || pointRadius != null) {
        // Geo Draft 2 default always geometry instead of polygon
        spatialSearch = new SpatialSearch(geometrySearch, boundingBox, null, pointRadius);
    }
    return spatialSearch;
}

From source file:org.apache.hadoop.hive.ql.MultiDriver_BAK.java

private CommandProcessorResponse runInternal(ArrayList<Pair<String, Configuration>> multiCmds)
        throws CommandNeedRetryException {
    errorMessage = null;/*from  w w w. j av  a2s. c  o  m*/
    SQLState = null;
    downstreamError = null;

    if (!validateConfVariables()) {
        return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    // Reset the perf logger
    PerfLogger perfLogger = PerfLogger.getPerfLogger(true);
    perfLogger.PerfLogBegin(LOG, PerfLogger.MULTIDRIVER_RUN);
    perfLogger.PerfLogBegin(LOG, PerfLogger.TIME_TO_SUBMIT);

    int ret;
    synchronized (compileMonitor) {
        ret = multiCompile(multiCmds);
    }

    if (ret != 0) {
        for (int key = 0; key < multiPctx.size(); key++) {
            Context ctx = multiPctx.get(key).getContext();
            releaseLocks(ctx.getHiveLocks());
        }
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    boolean requireLock = false;
    boolean ckLock = checkLockManager();

    if (ckLock) {
        boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
        if (lockOnlyMapred) {
            Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
            taskQueue.addAll(plan.getRootTasks());
            while (taskQueue.peek() != null) {
                Task<? extends Serializable> tsk = taskQueue.remove();
                requireLock = requireLock || tsk.requireLock();
                if (requireLock) {
                    break;
                }
                if (tsk instanceof ConditionalTask) {
                    taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
                }
                if (tsk.getChildTasks() != null) {
                    taskQueue.addAll(tsk.getChildTasks());
                }
                // does not add back up task here, because back up task should be the same
                // type of the original task.
            }
        } else {
            requireLock = true;
        }
    }

    if (requireLock) {
        ret = acquireReadWriteLocks();
        if (ret != 0) {
            releaseLocks(ctx.getHiveLocks());
            //  return new CommandProcessorResponse(ret, errorMessage, SQLState);
        }
    }
    boolean isexplain = ctx.getExplain();
    // if(isexplain){
    // multiOutputexplain();
    // }else{
    // reserved function
    ret = multiExecute();

    if (ret != 0) {
        //if needRequireLock is false, the release here will do nothing because there is no lock
        releaseLocks(ctx.getHiveLocks());
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }
    multiOutputResult();

    //if needRequireLock is false, the release here will do nothing because there is no lock
    releaseLocks(ctx.getHiveLocks());

    for (int key = 0; key < multiPctx.size(); key++) {
        Context ctx = multiPctx.get(key).getContext();
        releaseLocks(ctx.getHiveLocks());
    }

    multiPctx.clear();
    perfLogger.PerfLogEnd(LOG, PerfLogger.MULTIDRIVER_RUN);
    perfLogger.close(LOG, plan);

    return new CommandProcessorResponse(ret);
}

From source file:org.deeplearning4j.patent.TrainPatentClassifier.java

/**
 * JCommander entry point//from   w w  w.ja v  a  2 s.co m
 */
protected void entryPoint(String[] args) throws Exception {
    JCommanderUtils.parseArgs(this, args);

    //Azure storage account naming rules: https://blogs.msdn.microsoft.com/jmstall/2014/06/12/azure-storage-naming-rules/
    //The default exceptions aren't helpful, we'll validate this here
    if (!azureStorageAcct.matches("^[a-z0-9]+$") || azureStorageAcct.length() < 3
            || azureStorageAcct.length() > 24) {
        throw new IllegalStateException("Invalid storage account name: must be alphanumeric, lowercase, "
                + "3 to 24 characters. Got option azureStorageAcct=\"" + azureStorageAcct + "\"");
    }
    if (!azureContainerPreproc.matches("^[a-z0-9-]+$") || azureContainerPreproc.length() < 3
            || azureContainerPreproc.length() > 63) {
        throw new IllegalStateException(
                "Invalid Azure container name: must be alphanumeric or dash, lowercase, "
                        + "3 to 63 characters. Got option azureContainerPreproc=\"" + azureContainerPreproc
                        + "\"");
    }

    StringBuilder results = new StringBuilder(); //To store results/timing - will be written to disk on completion

    long startTime = System.currentTimeMillis();

    // Prepare neural net
    ComputationGraph net = new ComputationGraph(NetworkConfiguration.getConf());
    net.init();
    log.info("Parameters: {}", net.params().length());

    // Configure Spark
    SparkConf sparkConf = new SparkConf();
    sparkConf.setAppName(sparkAppName);
    JavaSparkContext sc = new JavaSparkContext();
    int numWorkers = this.numNodes * this.numWorkersPerNode;

    //Prepare dataset RDDs
    String dirName = "seqLength" + maxSequenceLength + "_mb" + minibatch;
    String containerRoot = "wasbs://" + azureContainerPreproc + "@" + azureStorageAcct
            + ".blob.core.windows.net/";
    String baseOutPath = containerRoot + dirName;
    String trainDataPathRootDir = baseOutPath + "/train/";
    String testDataPathRootDir = baseOutPath + "/test/";
    JavaRDD<String> trainDataPaths = SparkUtils.listPaths(sc, trainDataPathRootDir);
    JavaRDD<String> testDataPaths = totalExamplesTest <= 0 ? null
            : listPathsSubset(sc, testDataPathRootDir, totalExamplesTest, 12345);
    trainDataPaths.cache();
    if (testDataPaths != null)
        testDataPaths.cache();

    //If only doing evaluation: perform it here and exit
    if (evalOnly) {
        evaluateOnly(sc, net, testDataPaths);
        return;
    }

    //Write configuration to output directory. Also determine output base directory for results
    writeConfig(sc);

    //Set up TrainingMaster for gradient sharing training
    VoidConfiguration voidConfiguration = VoidConfiguration.builder().unicastPort(port) // Should be open for IN/OUT communications on all Spark nodes
            .networkMask(networkMask) // Local network mask
            .controllerAddress(masterIP).build();
    TrainingMaster tm = new SharedTrainingMaster.Builder(voidConfiguration, minibatch).rngSeed(12345)
            .collectTrainingStats(false).batchSizePerWorker(minibatch) // Minibatch size for each worker
            .workersPerNode(numWorkersPerNode) // Workers per node
            .thresholdAlgorithm(new AdaptiveThresholdAlgorithm(gradientThreshold)).build();
    tm.setCollectTrainingStats(false);

    //If continueTraining==true and checkpoints are available available: Load checkpoint to continue training
    int firstSubsetIdx = 0;
    if (continueTraining) {
        Pair<Integer, ComputationGraph> p = loadCheckpoint();
        if (p != null) {
            firstSubsetIdx = p.getFirst();
            net = p.getSecond();
        }
    }

    //Setup saving of parameter snapshots. This is so we can calculate accuracy vs. time
    final AtomicBoolean isTraining = new AtomicBoolean(false);
    final File baseParamSaveDir = new File(outputPath, "paramSnapshots");
    if (!baseParamSaveDir.exists())
        baseParamSaveDir.mkdirs();

    //Prepare Spark version of neural net
    SparkComputationGraph sparkNet = new SparkComputationGraph(sc, net, tm);
    sparkNet.setCollectTrainingStats(tm.getIsCollectTrainingStats());

    // Add listeners
    sparkNet.setListeners(new PerformanceListener(listenerFrequency, true));

    // Time setup
    long endTimeMs = System.currentTimeMillis();
    double elapsedSec = (endTimeMs - startTime) / MILLISEC_PER_SEC;
    log.info("Setup timing: {} s", elapsedSec);
    results.append("Setup timing: ").append(elapsedSec).append(" sec\n");

    String resultsFile = FilenameUtils.concat(outputPath, "results.txt");
    if (new File(resultsFile).exists()) {
        String str = "\n\n\n============================================================================"
                + results.toString();
        FileUtils.writeStringToFile(new File(resultsFile), str, Charset.forName("UTF-8"), true);
    } else {
        FileUtils.writeStringToFile(new File(resultsFile), results.toString(), Charset.forName("UTF-8"));
    }

    //Random split into RDDs of exactly "convNumBatches" objects
    long countTrain = trainDataPaths.count();
    JavaRDD<String>[] trainSubsets;
    if (batchesBtwCheckpoints > 1) {
        trainSubsets = SparkUtils.balancedRandomSplit((int) countTrain, batchesBtwCheckpoints, trainDataPaths);
    } else {
        trainSubsets = (JavaRDD<String>[]) new JavaRDD[] { trainDataPaths };
    }

    DataSetLoader datasetLoader = new LoadDataSetsFunction(wordVectorsPath,
            PatentLabelGenerator.classLabelFilteredCounts().size(), 300);

    //Before training starts: start the thread to track convergence. This thread asyncronously saves params periodically for later evaluation
    AtomicInteger currentSubset = new AtomicInteger(0);
    Queue<ToEval> toEvalQueue = ConvergenceRunnable.startConvergenceThread(baseParamSaveDir, currentSubset,
            isTraining, saveFreqSec, sparkNet.getNetwork().params());
    log.info("Network saving thread started: saving copy every {} sec", saveFreqSec);

    boolean firstSave = true;
    long startTrain = System.currentTimeMillis();
    for (int epoch = 0; epoch < numEpochs; epoch++) {
        for (int i = firstSubsetIdx; i < trainSubsets.length; i++) {
            currentSubset.set(i);
            log.info("Starting training: epoch {} of {}, subset {} of {} ({} minibatches)", (epoch + 1),
                    numEpochs, (i + 1), trainSubsets.length, batchesBtwCheckpoints);
            long start = System.currentTimeMillis();
            isTraining.set(true);
            sparkNet.fitPaths(trainSubsets[i], datasetLoader);
            isTraining.set(false);
            long end = System.currentTimeMillis();
            log.info("Finished training: epoch {} of {}, subset {} of {} ({} minibatches) in {} sec",
                    (epoch + 1), numEpochs, (i + 1), trainSubsets.length, batchesBtwCheckpoints,
                    (end - start) / 1000);

            String fileName = "netCheckpoint_" + System.currentTimeMillis() + "_epoch" + epoch + "_subset" + i
                    + ".zip";
            String outpath = FilenameUtils.concat(outputPath, "nets/" + fileName);
            File f = new File(outpath);
            if (firstSave) {
                firstSave = false;
                f.getParentFile().mkdirs();
            }
            ModelSerializer.writeModel(sparkNet.getNetwork(), f, true);
            log.info("Saved network checkpoint to {}", outpath);

            //Now, evaluate the saved checkpoint files
            List<ToEval> toEval = new ArrayList<>();
            while (toEvalQueue.size() > 0) {
                toEval.add(toEvalQueue.remove());
            }

            if (totalExamplesTest > 0 && toEval.size() > 0) {
                log.info("Starting evaluation of {} checkpoint files", toEval.size());
                ComputationGraph cgForEval = sparkNet.getNetwork().clone();
                SparkComputationGraph scgForEval = new SparkComputationGraph(sc, cgForEval, null);
                for (ToEval te : toEval) {
                    INDArray params = Nd4j.readBinary(te.getFile());
                    cgForEval.params().assign(params);

                    long startEval = System.currentTimeMillis();
                    IEvaluation[] evals = scgForEval.doEvaluation(testDataPaths, 4, minibatch, datasetLoader,
                            new Evaluation());
                    long endEval = System.currentTimeMillis();

                    StringBuilder sb = new StringBuilder();
                    Evaluation e = (Evaluation) evals[0];
                    sb.append("network ").append(te.getCount()).append(" trainingMs ")
                            .append(te.getDurationSoFar()).append(" evalMS ").append(endEval - startEval)
                            .append(" accuracy ").append(e.accuracy()).append(" f1 ").append(e.f1())
                            .append("\n");

                    FileUtils.writeStringToFile(new File(resultsFile), sb.toString(), Charset.forName("UTF-8"),
                            true); //Append new output to file
                    saveEvaluation(false, evals, sc);
                    log.info("Evaluation: {}", sb.toString());

                }
            }

            if (maxRuntimeSec > 0
                    && (System.currentTimeMillis() - startTrain) / MILLISEC_PER_SEC > maxRuntimeSec) {
                log.info("Terminating due to exceeding max runtime");
                epoch = numEpochs;
                break;
            }
        }
        firstSubsetIdx = 0;
    }

    log.info("----- Example Complete -----");
    sc.stop();
    System.exit(0);
}

From source file:org.apache.hadoop.hive.ql.Driver.java

private boolean requiresLock() {
    if (!checkConcurrency()) {
        return false;
    }/*from w w  w. j a va 2 s . c o m*/
    // Lock operations themselves don't require the lock.
    if (isExplicitLockOperation()) {
        return false;
    }
    if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_LOCK_MAPRED_ONLY)) {
        return true;
    }
    Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
    taskQueue.addAll(plan.getRootTasks());
    while (taskQueue.peek() != null) {
        Task<? extends Serializable> tsk = taskQueue.remove();
        if (tsk.requireLock()) {
            return true;
        }
        if (tsk instanceof ConditionalTask) {
            taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
        }
        if (tsk.getChildTasks() != null) {
            taskQueue.addAll(tsk.getChildTasks());
        }
        // does not add back up task here, because back up task should be the same
        // type of the original task.
    }
    return false;
}