Example usage for java.util SortedMap entrySet

List of usage examples for java.util SortedMap entrySet

Introduction

In this page you can find the example usage for java.util SortedMap entrySet.

Prototype

Set<Map.Entry<K, V>> entrySet();

Source Link

Document

Returns a Set view of the mappings contained in this map.

Usage

From source file:org.openmrs.module.ModuleFactory.java

/**
 * This method should not be called directly.<br>
 * <br>//from  w  w  w  .  j a  va 2s  .co m
 * The {@link #startModule(Module)} (and hence {@link Daemon#startModule(Module)}) calls this
 * method in a new Thread and is authenticated as the {@link Daemon} user<br>
 * <br>
 * Runs through extensionPoints and then calls {@link BaseModuleActivator#willStart()} on the
 * Module's activator. <br>
 * <br>
 * If a non null application context is passed in, it gets refreshed to make the module's
 * services available
 * 
 * @param module Module to start
 * @param isOpenmrsStartup Specifies whether this module is being started at application startup
 *            or not, this argument is ignored if a null application context is passed in
 * @param applicationContext the spring application context instance to refresh
 */
public static Module startModuleInternal(Module module, boolean isOpenmrsStartup,
        AbstractRefreshableApplicationContext applicationContext) throws ModuleException {

    if (module != null) {

        String moduleId = module.getModuleId();
        try {

            // check to be sure this module can run with our current version
            // of OpenMRS code
            String requireVersion = module.getRequireOpenmrsVersion();
            ModuleUtil.checkRequiredVersion(OpenmrsConstants.OPENMRS_VERSION_SHORT, requireVersion);

            // check for required modules
            if (!requiredModulesStarted(module)) {
                throw new ModuleException(getFailedToStartModuleMessage(module));
            }

            // fire up the classloader for this module
            ModuleClassLoader moduleClassLoader = new ModuleClassLoader(module,
                    ModuleFactory.class.getClassLoader());
            getModuleClassLoaderMap().put(module, moduleClassLoader);
            registerProvidedPackages(moduleClassLoader);

            // don't load the advice objects into the Context
            // At startup, the spring context isn't refreshed until all modules
            // have been loaded.  This causes errors if called here during a
            // module's startup if one of these advice points is on another
            // module because that other module's service won't have been loaded
            // into spring yet.  All advice for all modules must be reloaded
            // a spring context refresh anyway, so skip the advice loading here
            // loadAdvice(module);

            // map extension point to a list of extensions for this module only
            Map<String, List<Extension>> moduleExtensionMap = new HashMap<String, List<Extension>>();
            for (Extension ext : module.getExtensions()) {

                String extId = ext.getExtensionId();
                List<Extension> tmpExtensions = moduleExtensionMap.get(extId);
                if (tmpExtensions == null) {
                    tmpExtensions = new Vector<Extension>();
                    moduleExtensionMap.put(extId, tmpExtensions);
                }

                tmpExtensions.add(ext);
            }

            // Sort this module's extensions, and merge them into the full extensions map
            Comparator<Extension> sortOrder = new Comparator<Extension>() {

                @Override
                public int compare(Extension e1, Extension e2) {
                    return Integer.valueOf(e1.getOrder()).compareTo(Integer.valueOf(e2.getOrder()));
                }
            };
            for (Map.Entry<String, List<Extension>> moduleExtensionEntry : moduleExtensionMap.entrySet()) {
                // Sort this module's extensions for current extension point
                List<Extension> sortedModuleExtensions = moduleExtensionEntry.getValue();
                Collections.sort(sortedModuleExtensions, sortOrder);

                // Get existing extensions, and append the ones from the new module
                List<Extension> extensions = getExtensionMap().get(moduleExtensionEntry.getKey());
                if (extensions == null) {
                    extensions = new Vector<Extension>();
                    getExtensionMap().put(moduleExtensionEntry.getKey(), extensions);
                }
                for (Extension ext : sortedModuleExtensions) {
                    log.debug(
                            "Adding to mapping ext: " + ext.getExtensionId() + " ext.class: " + ext.getClass());
                    extensions.add(ext);
                }
            }

            // run the module's sql update script
            // This and the property updates are the only things that can't
            // be undone at startup, so put these calls after any other
            // calls that might hinder startup
            SortedMap<String, String> diffs = SqlDiffFileParser.getSqlDiffs(module);

            try {
                // this method must check and run queries against the database.
                // to do this, it must be "authenticated".  Give the current
                // "user" the proxy privilege so this can be done. ("user" might
                // be nobody because this is being run at startup)
                Context.addProxyPrivilege("");

                for (Map.Entry<String, String> entry : diffs.entrySet()) {
                    String version = entry.getKey();
                    String sql = entry.getValue();
                    if (StringUtils.hasText(sql)) {
                        runDiff(module, version, sql);
                    }
                }
            } finally {
                // take the "authenticated" privilege away from the current "user"
                Context.removeProxyPrivilege("");
            }

            // run module's optional liquibase.xml immediately after sqldiff.xml
            runLiquibase(module);

            // effectively mark this module as started successfully
            getStartedModulesMap().put(moduleId, module);
            if (actualStartupOrder == null) {
                actualStartupOrder = new LinkedHashSet<String>();
            }
            actualStartupOrder.add(moduleId);

            try {
                // save the state of this module for future restarts
                saveGlobalProperty(moduleId + ".started", "true",
                        getGlobalPropertyStartedDescription(moduleId));

                // save the mandatory status
                saveGlobalProperty(moduleId + ".mandatory", String.valueOf(module.isMandatory()),
                        getGlobalPropertyMandatoryModuleDescription(moduleId));
            } catch (Exception e) {
                // pass over errors because this doesn't really concern startup
                // passing over this also allows for multiple of the same-named modules
                // to be loaded in junit tests that are run within one session
                log.debug("Got an error when trying to set the global property on module startup", e);
            }

            // (this must be done after putting the module in the started
            // list)
            // if this module defined any privileges or global properties,
            // make sure they are added to the database
            // (Unfortunately, placing the call here will duplicate work
            // done at initial app startup)
            if (module.getPrivileges().size() > 0 || module.getGlobalProperties().size() > 0) {
                log.debug("Updating core dataset");
                Context.checkCoreDataset();
                // checkCoreDataset() currently doesn't throw an error. If
                // it did, it needs to be
                // caught and the module needs to be stopped and given a
                // startup error
            }

            // should be near the bottom so the module has all of its stuff
            // set up for it already.
            try {
                if (module.getModuleActivator() != null) {
                    // if extends BaseModuleActivator
                    module.getModuleActivator().willStart();
                } else {
                    module.getActivator().startup();//implements old Activator interface
                }
            } catch (ModuleException e) {
                // just rethrow module exceptions. This should be used for a
                // module marking that it had trouble starting
                throw e;
            } catch (Exception e) {
                throw new ModuleException("Error while calling module's Activator.startup()/willStart() method",
                        e);
            }

            // erase any previous startup error
            module.clearStartupError();
        } catch (Exception e) {
            log.warn("Error while trying to start module: " + moduleId, e);
            module.setStartupErrorMessage("Error while trying to start module", e);
            notifySuperUsersAboutModuleFailure(module);

            // undo all of the actions in startup
            try {
                boolean skipOverStartedProperty = false;

                if (e instanceof ModuleMustStartException) {
                    skipOverStartedProperty = true;
                }

                stopModule(module, skipOverStartedProperty, true);
            } catch (Exception e2) {
                // this will probably occur about the same place as the
                // error in startup
                log.debug("Error while stopping module: " + moduleId, e2);
            }
        }

    }

    if (applicationContext != null) {
        ModuleUtil.refreshApplicationContext(applicationContext, isOpenmrsStartup, module);
    }

    return module;
}

From source file:org.opennms.features.newts.converter.NewtsConverter.java

private void injectSamplesToNewts(final ResourcePath resourcePath, final String group,
        final List<? extends AbstractDS> dataSources, final SortedMap<Long, List<Double>> samples) {
    final ResourcePath groupPath = ResourcePath.get(resourcePath, group);

    // Create a resource ID from the resource path
    final String groupId = NewtsUtils.toResourceId(groupPath);

    // Build indexing attributes
    final Map<String, String> attributes = Maps.newHashMap();
    NewtsUtils.addIndicesToAttributes(groupPath, attributes);

    // Create the NewTS resource to insert
    final Resource resource = new Resource(groupId, Optional.of(attributes));

    // Transform the RRD samples into NewTS samples
    List<Sample> batch = new ArrayList<>(this.batchSize);
    for (final Map.Entry<Long, List<Double>> s : samples.entrySet()) {
        for (int i = 0; i < dataSources.size(); i++) {
            final double value = s.getValue().get(i);
            if (Double.isNaN(value)) {
                continue;
            }/*w  w  w  . j a  va  2s .c  o m*/
            final AbstractDS ds = dataSources.get(i);
            final Timestamp timestamp = Timestamp.fromEpochSeconds(s.getKey());

            try {
                batch.add(toSample(ds, resource, timestamp, value));
            } catch (IllegalArgumentException e) {
                // This can happen when the value is outside of the range for the expected
                // type i.e. negative for a counter, so we silently skip these
                continue;
            }

            if (batch.size() >= this.batchSize) {
                this.repository.insert(batch, true);
                this.processedSamples.getAndAdd(batch.size());

                batch = new ArrayList<>(this.batchSize);
            }
        }
    }

    if (!batch.isEmpty()) {
        this.repository.insert(batch, true);
        this.processedSamples.getAndAdd(batch.size());
    }

    this.processedMetrics.getAndAdd(dataSources.size());

    LOG.trace("Stats: {} / {}", this.processedMetrics, this.processedSamples);
}

From source file:org.opencastproject.workflow.handler.textanalyzer.TextAnalysisWorkflowOperationHandler.java

/**
 * Runs the text analysis service on each of the video segments found.
 *
 * @param mediaPackage/*from w  w  w .  j  a  v a  2s  . c  o  m*/
 *          the original mediapackage
 * @param operation
 *          the workflow operation
 * @throws ExecutionException
 * @throws InterruptedException
 * @throws NotFoundException
 * @throws WorkflowOperationException
 */
protected WorkflowOperationResult extractVideoText(final MediaPackage mediaPackage,
        WorkflowOperationInstance operation)
        throws EncoderException, InterruptedException, ExecutionException, IOException, NotFoundException,
        MediaPackageException, TextAnalyzerException, WorkflowOperationException, ServiceRegistryException {
    long totalTimeInQueue = 0;

    List<String> sourceTagSet = asList(operation.getConfiguration("source-tags"));
    List<String> targetTagSet = asList(operation.getConfiguration("target-tags"));

    // Select the catalogs according to the tags
    Map<Catalog, Mpeg7Catalog> catalogs = loadSegmentCatalogs(mediaPackage, operation);

    // Was there at least one matching catalog
    if (catalogs.size() == 0) {
        logger.debug("Mediapackage {} has no suitable mpeg-7 catalogs based on tags {} to to run text analysis",
                mediaPackage, sourceTagSet);
        return createResult(mediaPackage, Action.CONTINUE);
    }

    // Loop over all existing segment catalogs
    for (Entry<Catalog, Mpeg7Catalog> mapEntry : catalogs.entrySet()) {
        Map<VideoSegment, Job> jobs = new HashMap<VideoSegment, Job>();
        List<Attachment> images = new LinkedList<Attachment>();
        Catalog segmentCatalog = mapEntry.getKey();
        try {
            MediaPackageReference catalogRef = segmentCatalog.getReference();

            // Make sure we can figure out the source track
            if (catalogRef == null) {
                logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
            } else if (mediaPackage.getElementByReference(catalogRef) == null) {
                logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
            } else if (!(mediaPackage.getElementByReference(catalogRef) instanceof Track)) {
                logger.info("Skipping catalog {} since it's source was not a track", segmentCatalog);
            }

            logger.info("Analyzing mpeg-7 segments catalog {} for text", segmentCatalog);

            // Create a copy that will contain the segments enriched with the video text elements
            Mpeg7Catalog textCatalog = mapEntry.getValue().clone();
            Track sourceTrack = mediaPackage.getTrack(catalogRef.getIdentifier());

            // Load the temporal decomposition (segments)
            Video videoContent = textCatalog.videoContent().next();
            TemporalDecomposition<? extends Segment> decomposition = videoContent.getTemporalDecomposition();
            Iterator<? extends Segment> segmentIterator = decomposition.segments();

            // For every segment, try to find the still image and run text analysis on it
            List<VideoSegment> videoSegments = new LinkedList<VideoSegment>();
            while (segmentIterator.hasNext()) {
                Segment segment = segmentIterator.next();
                if ((segment instanceof VideoSegment))
                    videoSegments.add((VideoSegment) segment);
            }

            // argument array for image extraction
            long[] times = new long[videoSegments.size()];

            for (int i = 0; i < videoSegments.size(); i++) {
                VideoSegment videoSegment = videoSegments.get(i);
                MediaTimePoint segmentTimePoint = videoSegment.getMediaTime().getMediaTimePoint();
                MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();

                // Choose a time
                MediaPackageReference reference = null;
                if (catalogRef == null)
                    reference = new MediaPackageReferenceImpl();
                else
                    reference = new MediaPackageReferenceImpl(catalogRef.getType(), catalogRef.getIdentifier());
                reference.setProperty("time", segmentTimePoint.toString());

                // Have the time for ocr image created. To circumvent problems with slowly building slides, we take the image
                // that is
                // almost at the end of the segment, it should contain the most content and is stable as well.
                long startTimeSeconds = segmentTimePoint.getTimeInMilliseconds() / 1000;
                long durationSeconds = segmentDuration.getDurationInMilliseconds() / 1000;
                times[i] = Math.max(startTimeSeconds + durationSeconds - stabilityThreshold + 1, 0);
            }

            // Have the ocr image(s) created.

            // TODO: Note that the way of having one image extracted after the other is suited for
            // the ffmpeg-based encoder. When switching to other encoding engines such as gstreamer, it might be preferable
            // to pass in all timepoints to the image extraction method at once.
            SortedMap<Long, Job> extractImageJobs = new TreeMap<Long, Job>();

            try {
                for (long time : times) {
                    extractImageJobs.put(time, composer.image(sourceTrack, IMAGE_EXTRACTION_PROFILE, time));
                }
                if (!waitForStatus(extractImageJobs.values().toArray(new Job[extractImageJobs.size()]))
                        .isSuccess())
                    throw new WorkflowOperationException(
                            "Extracting scene image from " + sourceTrack + " failed");
                for (Map.Entry<Long, Job> entry : extractImageJobs.entrySet()) {
                    Job job = serviceRegistry.getJob(entry.getValue().getId());
                    Attachment image = (Attachment) MediaPackageElementParser.getFromXml(job.getPayload());
                    images.add(image);
                    totalTimeInQueue += job.getQueueTime();
                }
            } catch (EncoderException e) {
                logger.error("Error creating still image(s) from {}", sourceTrack);
                throw e;
            }

            // Run text extraction on each of the images
            Iterator<VideoSegment> it = videoSegments.iterator();
            for (MediaPackageElement element : images) {
                Attachment image = (Attachment) element;
                VideoSegment videoSegment = it.next();
                jobs.put(videoSegment, analysisService.extract(image));
            }

            // Wait for all jobs to be finished
            if (!waitForStatus(jobs.values().toArray(new Job[jobs.size()])).isSuccess()) {
                throw new WorkflowOperationException("Text extraction failed on images from " + sourceTrack);
            }

            // Process the text extraction results
            for (Map.Entry<VideoSegment, Job> entry : jobs.entrySet()) {
                Job job = serviceRegistry.getJob(entry.getValue().getId());
                totalTimeInQueue += job.getQueueTime();

                VideoSegment videoSegment = entry.getKey();
                MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();
                Catalog catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
                if (catalog == null) {
                    logger.warn("Text analysis did not return a valid mpeg7 for segment {}", videoSegment);
                    continue;
                }
                Mpeg7Catalog videoTextCatalog = loadMpeg7Catalog(catalog);
                if (videoTextCatalog == null)
                    throw new IllegalStateException("Text analysis service did not return a valid mpeg7");

                // Add the spatiotemporal decompositions from the new catalog to the existing video segments
                Iterator<Video> videoTextContents = videoTextCatalog.videoContent();
                if (videoTextContents == null || !videoTextContents.hasNext()) {
                    logger.debug("Text analysis was not able to extract any text from {}",
                            job.getArguments().get(0));
                    break;
                }

                try {
                    Video textVideoContent = videoTextContents.next();
                    VideoSegment textVideoSegment = (VideoSegment) textVideoContent.getTemporalDecomposition()
                            .segments().next();
                    VideoText[] videoTexts = textVideoSegment.getSpatioTemporalDecomposition().getVideoText();
                    SpatioTemporalDecomposition std = videoSegment.createSpatioTemporalDecomposition(true,
                            false);
                    for (VideoText videoText : videoTexts) {
                        MediaTime mediaTime = new MediaTimeImpl(new MediaRelTimePointImpl(0), segmentDuration);
                        SpatioTemporalLocator locator = new SpatioTemporalLocatorImpl(mediaTime);
                        videoText.setSpatioTemporalLocator(locator);
                        std.addVideoText(videoText);
                    }
                } catch (Exception e) {
                    logger.warn("The mpeg-7 structure returned by the text analyzer is not what is expected",
                            e);
                    continue;
                }
            }

            // Put the catalog into the workspace and add it to the media package
            MediaPackageElementBuilder builder = MediaPackageElementBuilderFactory.newInstance()
                    .newElementBuilder();
            Catalog catalog = (Catalog) builder.newElement(MediaPackageElement.Type.Catalog,
                    MediaPackageElements.TEXTS);
            catalog.setIdentifier(null);
            catalog.setReference(segmentCatalog.getReference());
            mediaPackage.add(catalog); // the catalog now has an ID, so we can store the file properly
            InputStream in = mpeg7CatalogService.serialize(textCatalog);
            String filename = "slidetext.xml";
            URI workspaceURI = workspace.put(mediaPackage.getIdentifier().toString(), catalog.getIdentifier(),
                    filename, in);
            catalog.setURI(workspaceURI);

            // Since we've enriched and stored the mpeg7 catalog, remove the original
            try {
                mediaPackage.remove(segmentCatalog);
                workspace.delete(segmentCatalog.getURI());
            } catch (Exception e) {
                logger.warn("Unable to delete segment catalog {}: {}", segmentCatalog.getURI(), e);
            }

            // Add flavor and target tags
            catalog.setFlavor(MediaPackageElements.TEXTS);
            for (String tag : targetTagSet) {
                catalog.addTag(tag);
            }
        } finally {
            // Remove images that were created for text extraction
            logger.debug("Removing temporary images");
            for (Attachment image : images) {
                try {
                    workspace.delete(image.getURI());
                } catch (Exception e) {
                    logger.warn("Unable to delete temporary image {}: {}", image.getURI(), e);
                }
            }
            // Remove the temporary text
            for (Job j : jobs.values()) {
                Catalog catalog = null;
                try {
                    Job job = serviceRegistry.getJob(j.getId());
                    if (!Job.Status.FINISHED.equals(job.getStatus()))
                        continue;
                    catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
                    if (catalog != null)
                        workspace.delete(catalog.getURI());
                } catch (Exception e) {
                    if (catalog != null) {
                        logger.warn("Unable to delete temporary text file {}: {}", catalog.getURI(), e);
                    } else {
                        logger.warn("Unable to parse textextraction payload of job {}", j.getId());
                    }
                }
            }
        }
    }

    logger.debug("Text analysis completed");
    return createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue);
}

From source file:com.aurel.track.item.link.ItemLinkBL.java

/**
 * Gets the links for a workItem// ww w. j ava 2 s .  com
 * @return
 */
static List<ItemLinkListEntry> getLinks(SortedMap<Integer, TWorkItemLinkBean> successorsForMeAsPredecessorMap,
        SortedMap<Integer, TWorkItemLinkBean> predecessorsForMeAsSuccessorMap, boolean editable, Locale locale,
        boolean newItem) {
    Map<Integer, TLinkTypeBean> linkTypeMap = GeneralUtils.createMapFromList(LinkTypeBL.loadAll());
    List<ItemLinkListEntry> itemLinkList = new ArrayList<ItemLinkListEntry>();
    //links from me as predecessor to successors
    if (successorsForMeAsPredecessorMap != null && !successorsForMeAsPredecessorMap.isEmpty()) {
        Set<Integer> successorItemIDs = new HashSet<Integer>();
        for (TWorkItemLinkBean workItemLinkBean : successorsForMeAsPredecessorMap.values()) {
            successorItemIDs.add(workItemLinkBean.getLinkSucc());
        }
        List<TWorkItemBean> successorItems = ItemBL
                .loadByWorkItemKeys(GeneralUtils.createIntArrFromSet(successorItemIDs));
        Map<Integer, TWorkItemBean> workItemsMap = GeneralUtils.createMapFromList(successorItems);
        for (Map.Entry<Integer, TWorkItemLinkBean> entry : successorsForMeAsPredecessorMap.entrySet()) {
            TWorkItemLinkBean workItemLinkBean = entry.getValue();
            Integer linkID = null;
            if (newItem) {
                //the sort order from the map
                linkID = entry.getKey();
            } else {
                //the saved linkID
                linkID = workItemLinkBean.getObjectID();
            }
            Integer linkType = workItemLinkBean.getLinkType();
            TLinkTypeBean linkTypeBean = linkTypeMap.get(linkType);
            Integer linkTypeDirection = linkTypeBean.getLinkDirection();
            if (linkTypeBean == null || linkTypeDirection == null
                    || linkTypeDirection.intValue() == LINK_DIRECTION.RIGHT_TO_LEFT) {
                //remove the links of type "right to left". Bidirectional and "left to right" (pred to succ) relations remain
                //for right to left link types the links are visible only from successor item
                continue;
            }
            Integer succesorItemID = workItemLinkBean.getLinkSucc();
            TWorkItemBean workItemBean = workItemsMap.get(succesorItemID);
            ItemLinkListEntry itemLinkListEntry = new ItemLinkListEntry();
            itemLinkListEntry.setSortOrder(workItemLinkBean.getSortorder());
            itemLinkListEntry.setLinkType(linkType);
            itemLinkListEntry.setLinkDirection(linkTypeDirection);
            itemLinkListEntry.setLinkedWorkItemID(succesorItemID);
            if (workItemLinkBean != null) {
                itemLinkListEntry.setLinkedWorkItemTitle(workItemBean.getSynopsis());
            }
            itemLinkListEntry.setDescription(workItemLinkBean.getDescription());
            itemLinkListEntry.setLinkID(linkID);
            itemLinkListEntry.setLinkTypeName(
                    LinkTypeBL.getLinkTypeName(linkTypeBean, workItemLinkBean.getLinkDirection(), locale));
            TStateBean stateBean = LookupContainer.getStatusBean(workItemBean.getStateID(), locale);
            if (stateBean != null) {
                itemLinkListEntry.setStateLabel(stateBean.getLabel());
            }
            ILabelBean responsiblePerson = LookupContainer.getPersonBean(workItemBean.getResponsibleID());
            if (responsiblePerson != null) {
                itemLinkListEntry.setResponsibleLabel(responsiblePerson.getLabel());
            }
            itemLinkListEntry.setLastEdit(workItemLinkBean.getLastEdit());
            boolean isInline = false;
            ILinkType linkTypeInstance = LinkTypeBL.getLinkTypePluginInstanceByLinkTypeKey(linkType);
            if (linkTypeInstance != null) {
                itemLinkListEntry.setParameters(linkTypeInstance.prepareParametersOnLinkTab(workItemLinkBean,
                        linkTypeDirection, locale));
                itemLinkListEntry.setParameterMap(linkTypeInstance.prepareParametersMap(workItemLinkBean));
                isInline = linkTypeInstance.isInline();
            }
            itemLinkListEntry.setEditable(editable && !isInline);
            itemLinkList.add(itemLinkListEntry);
        }
    }
    //links from me as successor to predecessors
    if (predecessorsForMeAsSuccessorMap != null && !predecessorsForMeAsSuccessorMap.isEmpty()) {
        Set<Integer> predecessorItemIDs = new HashSet<Integer>();
        for (TWorkItemLinkBean workItemLinkBean : predecessorsForMeAsSuccessorMap.values()) {
            predecessorItemIDs.add(workItemLinkBean.getLinkPred());
        }
        List<TWorkItemBean> predecessorItems = ItemBL
                .loadByWorkItemKeys(GeneralUtils.createIntArrFromSet(predecessorItemIDs));
        Map<Integer, TWorkItemBean> workItemsMap = GeneralUtils.createMapFromList(predecessorItems);
        for (Map.Entry<Integer, TWorkItemLinkBean> entry : predecessorsForMeAsSuccessorMap.entrySet()) {
            TWorkItemLinkBean workItemLinkBean = entry.getValue();
            Integer linkID = null;
            if (newItem) {
                //the sort order from the map
                linkID = entry.getKey();
            } else {
                //the saved linkID
                linkID = workItemLinkBean.getObjectID();
            }
            Integer linkType = workItemLinkBean.getLinkType();
            TLinkTypeBean linkTypeBean = linkTypeMap.get(linkType);
            Integer linkTypeDirection = linkTypeBean.getLinkDirection();
            if (linkTypeBean == null || linkTypeDirection == null
                    || linkTypeDirection.intValue() == LINK_DIRECTION.LEFT_TO_RIGHT) {
                //remove the links of type "left to right". Bidirectional and "right to left" (pred to succ) relations remain
                //for left to right link types the links are visible only from predecessor item
                continue;
            }
            if (linkTypeDirection.intValue() == LINK_DIRECTION.BIDIRECTIONAL) {
                linkTypeDirection = LinkTypeBL.getReverseDirection(workItemLinkBean.getLinkDirection());
            }
            Integer predecessorItemID = workItemLinkBean.getLinkPred();
            TWorkItemBean workItemBean = workItemsMap.get(predecessorItemID);
            ItemLinkListEntry itemLinkListEntry = new ItemLinkListEntry();
            itemLinkListEntry.setSortOrder(workItemLinkBean.getSortorder());
            itemLinkListEntry.setLinkType(linkType);
            itemLinkListEntry.setLinkDirection(linkTypeDirection);
            itemLinkListEntry.setLinkedWorkItemID(predecessorItemID);
            if (workItemLinkBean != null) {
                itemLinkListEntry.setLinkedWorkItemTitle(workItemBean.getSynopsis());
            }
            itemLinkListEntry.setDescription(workItemLinkBean.getDescription());
            itemLinkListEntry.setLinkID(linkID);
            itemLinkListEntry
                    .setLinkTypeName(LinkTypeBL.getLinkTypeName(linkTypeBean, linkTypeDirection, locale));
            TStateBean stateBean = LookupContainer.getStatusBean(workItemBean.getStateID(), locale);
            if (stateBean != null) {
                itemLinkListEntry.setStateLabel(stateBean.getLabel());
            }
            ILabelBean responsiblePerson = LookupContainer.getPersonBean(workItemBean.getResponsibleID());
            if (responsiblePerson != null) {
                itemLinkListEntry.setResponsibleLabel(responsiblePerson.getLabel());
            }
            itemLinkListEntry.setLastEdit(workItemLinkBean.getLastEdit());
            ILinkType linkTypeInstance = LinkTypeBL.getLinkTypePluginInstanceByLinkTypeKey(linkType);
            boolean isInline = false;
            if (linkTypeInstance != null) {
                itemLinkListEntry.setParameters(linkTypeInstance.prepareParametersOnLinkTab(workItemLinkBean,
                        linkTypeDirection, locale));
                itemLinkListEntry.setParameterMap(linkTypeInstance.prepareParametersMap(workItemLinkBean));
                isInline = linkTypeInstance.isInline();
            }
            itemLinkListEntry.setEditable(editable && !isInline);
            itemLinkList.add(itemLinkListEntry);
        }
    }
    Collections.sort(itemLinkList);
    return itemLinkList;
}

From source file:org.opencastproject.workflow.handler.TextAnalysisWorkflowOperationHandler.java

/**
 * Runs the text analysis service on each of the video segments found.
 * /*  w w w. j a  va2 s  .com*/
 * @param mediaPackage
 *          the original mediapackage
 * @param operation
 *          the workflow operation
 * @throws ExecutionException
 * @throws InterruptedException
 * @throws NotFoundException
 * @throws WorkflowOperationException
 */
protected WorkflowOperationResult extractVideoText(final MediaPackage mediaPackage,
        WorkflowOperationInstance operation)
        throws EncoderException, InterruptedException, ExecutionException, IOException, NotFoundException,
        MediaPackageException, TextAnalyzerException, WorkflowOperationException, ServiceRegistryException {
    long totalTimeInQueue = 0;

    List<String> sourceTagSet = asList(operation.getConfiguration("source-tags"));
    List<String> targetTagSet = asList(operation.getConfiguration("target-tags"));

    // Select the catalogs according to the tags
    Map<Catalog, Mpeg7Catalog> catalogs = loadSegmentCatalogs(mediaPackage, operation);

    // Was there at least one matching catalog
    if (catalogs.size() == 0) {
        logger.debug("Mediapackage {} has no suitable mpeg-7 catalogs based on tags {} to to run text analysis",
                mediaPackage, sourceTagSet);
        return createResult(mediaPackage, Action.CONTINUE);
    }

    // We need the videosegmenter's stability threshold in order to do proper work. If we can't get it, the default is
    // most probably ok, but certainly suboptimal.
    int stabilityThreshold = getStabilityThreshold();
    logger.debug("Using stability threshold {}s for slide extraction", stabilityThreshold);

    // Loop over all existing segment catalogs
    for (Entry<Catalog, Mpeg7Catalog> mapEntry : catalogs.entrySet()) {
        Map<VideoSegment, Job> jobs = new HashMap<VideoSegment, Job>();
        List<Attachment> images = new LinkedList<Attachment>();
        Catalog segmentCatalog = mapEntry.getKey();
        try {
            MediaPackageReference catalogRef = segmentCatalog.getReference();

            // Make sure we can figure out the source track
            if (catalogRef == null) {
                logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
            } else if (mediaPackage.getElementByReference(catalogRef) == null) {
                logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
            } else if (!(mediaPackage.getElementByReference(catalogRef) instanceof Track)) {
                logger.info("Skipping catalog {} since it's source was not a track", segmentCatalog);
            }

            logger.info("Analyzing mpeg-7 segments catalog {} for text", segmentCatalog);

            // Create a copy that will contain the segments enriched with the video text elements
            Mpeg7Catalog textCatalog = mapEntry.getValue().clone();
            Track sourceTrack = (Track) mediaPackage.getTrack(catalogRef.getIdentifier());

            // Load the temporal decomposition (segments)
            Video videoContent = textCatalog.videoContent().next();
            TemporalDecomposition<? extends Segment> decomposition = videoContent.getTemporalDecomposition();
            Iterator<? extends Segment> segmentIterator = decomposition.segments();

            // For every segment, try to find the still image and run text analysis on it
            List<VideoSegment> videoSegments = new LinkedList<VideoSegment>();
            while (segmentIterator.hasNext()) {
                Segment segment = segmentIterator.next();
                if ((segment instanceof VideoSegment))
                    videoSegments.add((VideoSegment) segment);
            }

            // argument array for image extraction
            long[] times = new long[videoSegments.size()];

            for (int i = 0; i < videoSegments.size(); i++) {
                VideoSegment videoSegment = videoSegments.get(i);
                MediaTimePoint segmentTimePoint = videoSegment.getMediaTime().getMediaTimePoint();
                MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();

                // Choose a time
                MediaPackageReference reference = null;
                if (catalogRef == null)
                    reference = new MediaPackageReferenceImpl();
                else
                    reference = new MediaPackageReferenceImpl(catalogRef.getType(), catalogRef.getIdentifier());
                reference.setProperty("time", segmentTimePoint.toString());

                // Have the time for ocr image created. To circumvent problems with slowly building slides, we take the image
                // that is
                // almost at the end of the segment, it should contain the most content and is stable as well.
                long startTimeSeconds = segmentTimePoint.getTimeInMilliseconds() / 1000;
                long durationSeconds = segmentDuration.getDurationInMilliseconds() / 1000;
                times[i] = Math.max(startTimeSeconds + durationSeconds - stabilityThreshold + 1, 0);
            }

            // Have the ocr image(s) created.

            // TODO: Note that the way of having one image extracted after the other is suited for
            // the ffmpeg-based encoder. When switching to other encoding engines such as gstreamer, it might be preferable
            // to pass in all timepoints to the image extraction method at once.
            SortedMap<Long, Job> extractImageJobs = new TreeMap<Long, Job>();

            try {
                for (long time : times) {
                    extractImageJobs.put(time, composer.image(sourceTrack, IMAGE_EXTRACTION_PROFILE, time));
                }
                if (!waitForStatus(extractImageJobs.values().toArray(new Job[extractImageJobs.size()]))
                        .isSuccess())
                    throw new WorkflowOperationException(
                            "Extracting scene image from " + sourceTrack + " failed");
                for (Map.Entry<Long, Job> entry : extractImageJobs.entrySet()) {
                    Job job = serviceRegistry.getJob(entry.getValue().getId());
                    Attachment image = (Attachment) MediaPackageElementParser.getFromXml(job.getPayload());
                    images.add(image);
                    totalTimeInQueue += job.getQueueTime();
                }
            } catch (EncoderException e) {
                logger.error("Error creating still image(s) from {}", sourceTrack);
                throw e;
            }

            // Run text extraction on each of the images
            Iterator<VideoSegment> it = videoSegments.iterator();
            for (MediaPackageElement element : images) {
                Attachment image = (Attachment) element;
                VideoSegment videoSegment = it.next();
                jobs.put(videoSegment, analysisService.extract(image));
            }

            // Wait for all jobs to be finished
            if (!waitForStatus(jobs.values().toArray(new Job[jobs.size()])).isSuccess()) {
                throw new WorkflowOperationException("Text extraction failed on images from " + sourceTrack);
            }

            // Process the text extraction results
            for (Map.Entry<VideoSegment, Job> entry : jobs.entrySet()) {
                Job job = serviceRegistry.getJob(entry.getValue().getId());
                totalTimeInQueue += job.getQueueTime();

                VideoSegment videoSegment = entry.getKey();
                MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();
                Catalog catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
                if (catalog == null) {
                    logger.warn("Text analysis did not return a valid mpeg7 for segment {}", videoSegment);
                    continue;
                }
                Mpeg7Catalog videoTextCatalog = loadMpeg7Catalog(catalog);
                if (videoTextCatalog == null)
                    throw new IllegalStateException("Text analysis service did not return a valid mpeg7");

                // Add the spatiotemporal decompositions from the new catalog to the existing video segments
                Iterator<Video> videoTextContents = videoTextCatalog.videoContent();
                if (videoTextContents == null || !videoTextContents.hasNext()) {
                    logger.debug("Text analysis was not able to extract any text from {}",
                            job.getArguments().get(0));
                    break;
                }

                try {
                    Video textVideoContent = videoTextContents.next();
                    VideoSegment textVideoSegment = (VideoSegment) textVideoContent.getTemporalDecomposition()
                            .segments().next();
                    VideoText[] videoTexts = textVideoSegment.getSpatioTemporalDecomposition().getVideoText();
                    SpatioTemporalDecomposition std = videoSegment.createSpatioTemporalDecomposition(true,
                            false);
                    for (VideoText videoText : videoTexts) {
                        MediaTime mediaTime = new MediaTimeImpl(new MediaRelTimePointImpl(0), segmentDuration);
                        SpatioTemporalLocator locator = new SpatioTemporalLocatorImpl(mediaTime);
                        videoText.setSpatioTemporalLocator(locator);
                        std.addVideoText(videoText);
                    }
                } catch (Exception e) {
                    logger.warn("The mpeg-7 structure returned by the text analyzer is not what is expected",
                            e);
                    continue;
                }
            }

            // Put the catalog into the workspace and add it to the media package
            MediaPackageElementBuilder builder = MediaPackageElementBuilderFactory.newInstance()
                    .newElementBuilder();
            Catalog catalog = (Catalog) builder.newElement(MediaPackageElement.Type.Catalog,
                    MediaPackageElements.TEXTS);
            catalog.setIdentifier(null);
            catalog.setReference(segmentCatalog.getReference());
            mediaPackage.add(catalog); // the catalog now has an ID, so we can store the file properly
            InputStream in = mpeg7CatalogService.serialize(textCatalog);
            String filename = "slidetext.xml";
            URI workspaceURI = workspace.put(mediaPackage.getIdentifier().toString(), catalog.getIdentifier(),
                    filename, in);
            catalog.setURI(workspaceURI);

            // Since we've enriched and stored the mpeg7 catalog, remove the original
            mediaPackage.remove(segmentCatalog);

            // Add flavor and target tags
            catalog.setFlavor(MediaPackageElements.TEXTS);
            for (String tag : targetTagSet) {
                catalog.addTag(tag);
            }
        } finally {
            try {
                workspace.delete(segmentCatalog.getURI());
            } catch (Exception e) {
                logger.warn("Unable to delete segment catalog {}: {}", segmentCatalog.getURI(), e);
            }
            // Remove images that were created for text extraction
            logger.debug("Removing temporary images");
            for (Attachment image : images) {
                try {
                    workspace.delete(image.getURI());
                } catch (Exception e) {
                    logger.warn("Unable to delete temporary image {}: {}", image.getURI(), e);
                }
            }
            // Remove the temporary text
            for (Job j : jobs.values()) {
                Catalog catalog = null;
                try {
                    Job job = serviceRegistry.getJob(j.getId());
                    catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
                    workspace.delete(catalog.getURI());
                } catch (Exception e) {
                    logger.warn("Unable to delete temporary text file {}: {}", catalog.getURI(), e);
                }
            }
        }
    }

    logger.debug("Text analysis completed");
    return createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue);
}

From source file:org.texai.torrent.PeerCoordinator.java

/** Reports the upload statistics. */
public void reportUploadStatistics() {
    final SortedMap<Long, TrackedPeerInfo> sortedUploadStatisticsDictionary = new TreeMap<>();
    synchronized (uploadStatisticsDictionary) {
        for (final Entry<TrackedPeerInfo, Long> entry : uploadStatisticsDictionary.entrySet()) {
            sortedUploadStatisticsDictionary.put(entry.getValue(), entry.getKey());
        }/*from  ww w. j  ava  2 s.c o  m*/
    }
    LOGGER.info("number bytes uploaded to peers ...");
    if (sortedUploadStatisticsDictionary.isEmpty()) {
        LOGGER.info("  none");
    } else {
        for (final Entry<Long, TrackedPeerInfo> entry : sortedUploadStatisticsDictionary.entrySet()) {
            LOGGER.info("  " + entry.getValue() + "  " + entry.getKey());
        }
    }
}

From source file:org.texai.torrent.PeerCoordinator.java

/** Reports the download statistics. */
public void reportDownloadStatistics() {
    final SortedMap<Long, TrackedPeerInfo> sortedDownloadStatisticsDictionary = new TreeMap<>();
    synchronized (downloadStatisticsDictionary) {
        for (final Entry<TrackedPeerInfo, Long> entry : downloadStatisticsDictionary.entrySet()) {
            sortedDownloadStatisticsDictionary.put(entry.getValue(), entry.getKey());
        }//from  w w  w  .  j av a2  s.c  o  m
    }
    LOGGER.info("number bytes downloaded from peers ...");
    if (sortedDownloadStatisticsDictionary.isEmpty()) {
        LOGGER.info("  none");
    } else {
        for (final Entry<Long, TrackedPeerInfo> entry : sortedDownloadStatisticsDictionary.entrySet()) {
            LOGGER.info("  " + entry.getValue() + "  " + entry.getKey());
        }
    }
}

From source file:org.apache.carbondata.core.scan.filter.FilterUtil.java

private static void pruneStartAndEndKeys(SortedMap<Integer, byte[]> setOfStartKeyByteArray,
        List<byte[]> listOfStartKeyByteArray) {
    for (Map.Entry<Integer, byte[]> entry : setOfStartKeyByteArray.entrySet()) {
        listOfStartKeyByteArray.add(entry.getValue());
    }/*from w  ww .java  2s . c  o m*/
}

From source file:org.apache.sling.commons.metrics.internal.MetricWebConsolePlugin.java

private void addMeterDetails(PrintWriter pw, SortedMap<String, Meter> meters) {
    if (meters.isEmpty()) {
        return;/*from  w ww. j ava2 s .c  o m*/
    }
    pw.println("<br>");
    pw.println("<div class='table'>");
    pw.println("<div class='ui-widget-header ui-corner-top buttonGroup'>Meters</div>");
    pw.println("<table class='nicetable' id='data-meters'>");
    pw.println("<thead>");
    pw.println("<tr>");
    pw.println("<th class='header'>Name</th>");
    pw.println("<th class='header'>Count</th>");
    pw.println("<th class='header'>Mean Rate</th>");
    pw.println("<th class='header'>OneMinuteRate</th>");
    pw.println("<th class='header'>FiveMinuteRate</th>");
    pw.println("<th class='header'>FifteenMinuteRate</ th>");
    pw.println("<th>RateUnit</th>");
    pw.println("</tr>");
    pw.println("</thead>");
    pw.println("<tbody>");

    String rowClass = "odd";
    for (Map.Entry<String, Meter> e : meters.entrySet()) {
        Meter m = e.getValue();
        String name = e.getKey();

        double rateFactor = timeUnit.rateFor(name).toSeconds(1);
        String rateUnit = "events/" + calculateRateUnit(timeUnit.rateFor(name));
        pw.printf("<tr class='%s ui-state-default'>%n", rowClass);

        pw.printf("<td>%s</td>", name);
        pw.printf("<td>%d</td>", m.getCount());
        pw.printf("<td>%f</td>", m.getMeanRate() * rateFactor);
        pw.printf("<td>%f</td>", m.getOneMinuteRate() * rateFactor);
        pw.printf("<td>%f</td>", m.getFiveMinuteRate() * rateFactor);
        pw.printf("<td>%f</td>", m.getFifteenMinuteRate() * rateFactor);
        pw.printf("<td>%s</td>", rateUnit);

        pw.println("</tr>");
        rowClass = "odd".equals(rowClass) ? "even" : "odd";
    }

    pw.println("</tbody>");
    pw.println("</table>");
    pw.println("</div>");
}

From source file:org.eclipse.ebr.maven.EclipseIpLogUtil.java

private void logOrCreateMissingCqs(final SortedMap<Artifact, Model> dependencies,
        final Map<String, String> existingCqs, final Map<String, Xpp3Dom> existingLicenses)
        throws MojoExecutionException {
    CloseableHttpClient httpclient = null;
    try {/* w  w  w . j av a 2  s  . c  o m*/

        if ((server != null) && (projectId != null)) {
            httpclient = HttpClients.custom().setRedirectStrategy(new LaxRedirectStrategy()).build();
            loginToPortal(httpclient, server);
        }

        for (final Entry<Artifact, Model> dependency : dependencies.entrySet()) {
            final Artifact artifact = dependency.getKey();
            final String artifactFileName = artifact.getFile().getName();
            final String existingCq = existingCqs.get(artifactFileName);
            if ((null == existingCq) || existingCq.trim().isEmpty()) {
                if (httpclient != null) {
                    getLog().info(format("Creating CQ for artifact %s:%s:%s.", artifact.getGroupId(),
                            artifact.getArtifactId(), artifact.getVersion()));
                    final String cqId = createCq(httpclient, artifact, dependency.getValue(), existingLicenses);
                    existingCqs.put(artifactFileName, cqId);
                    getLog().info(format("Created CQ %s for %s (artifact %s:%s:%s).", cqId, artifactFileName,
                            artifact.getGroupId(), artifact.getArtifactId(), artifact.getVersion()));
                } else {
                    getLog().warn(format(
                            "Missing CQ for %s (artifact %s:%s:%s). Please visit portal.eclipse.org and file a CQ with IPzilla!",
                            artifactFileName, artifact.getGroupId(), artifact.getArtifactId(),
                            artifact.getVersion()));
                }
            }
        }
    } catch (final IOException | URISyntaxException e) {
        getLog().debug(e);
        throw new MojoExecutionException(
                "An error occured communicating with the Eclipse Portal: " + e.getMessage());
    } finally {
        if (httpclient != null) {
            try {
                httpclient.close();
            } catch (final IOException e) {
                getLog().debug("Ignored exception during close.", e);
            }
        }
    }
}