Example usage for java.util TreeMap isEmpty

List of usage examples for java.util TreeMap isEmpty

Introduction

In this page you can find the example usage for java.util TreeMap isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this map contains no key-value mappings.

Usage

From source file:com.datatorrent.stram.plan.logical.LogicalPlanConfiguration.java

/**
 * Get the configuration opProps for the given operator.
 * These can be operator specific settings or settings from matching templates.
 * @param pa// ww w.  j  a va 2 s  .co m
 * @param opConfs
 * @param appName
 */
private Map<String, String> getProperties(PropertyArgs pa, List<OperatorConf> opConfs, String appName) {
    Map<String, String> opProps = Maps.newHashMap();
    Map<String, TemplateConf> templates = stramConf.getChildren(StramElement.TEMPLATE);
    // list of all templates that match operator, ordered by priority
    if (!templates.isEmpty()) {
        TreeMap<Integer, TemplateConf> matchingTemplates = getMatchingTemplates(pa, appName, templates);
        if (matchingTemplates != null && !matchingTemplates.isEmpty()) {
            // combined map of prioritized template settings
            for (TemplateConf t : matchingTemplates.descendingMap().values()) {
                opProps.putAll(Maps.fromProperties(t.properties));
            }
        }

        List<TemplateConf> refTemplates = getDirectTemplates(opConfs, templates);
        for (TemplateConf t : refTemplates) {
            opProps.putAll(Maps.fromProperties(t.properties));
        }
    }
    // direct settings
    // Apply the configurations in reverse order since the higher priority ones are at the beginning
    for (int i = opConfs.size() - 1; i >= 0; i--) {
        Conf conf1 = opConfs.get(i);
        opProps.putAll(Maps.fromProperties(conf1.properties));
    }
    return opProps;
}

From source file:com.redhat.rhn.taskomatic.task.DailySummary.java

/**
 * DO NOT CALL FROM OUTSIDE THIS CLASS. Renders the actions email message
 * @param actions list of recent actions
 * @return the actions email message/*from  w  ww .  j a v  a 2  s. c  o m*/
 */
public String renderActionsMessage(List<ActionMessage> actions) {

    int longestActionLength = HEADER_SPACER;
    int longestStatusLength = 0;
    StringBuilder hdr = new StringBuilder();
    StringBuilder body = new StringBuilder();
    StringBuilder legend = new StringBuilder();
    StringBuilder msg = new StringBuilder();
    LinkedHashSet<String> statusSet = new LinkedHashSet();
    TreeMap<String, Map<String, Integer>> nonErrataActions = new TreeMap();
    TreeMap<String, Map<String, Integer>> errataActions = new TreeMap();
    TreeMap<String, String> errataSynopsis = new TreeMap();

    legend.append(LocalizationService.getInstance().getMessage("taskomatic.daily.errata"));
    legend.append("\n\n");

    for (ActionMessage am : actions) {

        if (!statusSet.contains(am.getStatus())) {
            statusSet.add(am.getStatus());
            if (am.getStatus().length() > longestStatusLength) {
                longestStatusLength = am.getStatus().length();
            }
        }

        if (am.getType().equals(ERRATA_UPDATE)) {
            String advisoryKey = ERRATA_INDENTION + am.getAdvisory();

            if (!errataActions.containsKey(advisoryKey)) {
                errataActions.put(advisoryKey, new HashMap());
                if (advisoryKey.length() + HEADER_SPACER > longestActionLength) {
                    longestActionLength = advisoryKey.length() + HEADER_SPACER;
                }
            }
            Map<String, Integer> counts = errataActions.get(advisoryKey);
            counts.put(am.getStatus(), am.getCount());

            if (am.getAdvisory() != null && !errataSynopsis.containsKey(am.getAdvisory())) {
                errataSynopsis.put(am.getAdvisory(), am.getSynopsis());
            }
        } else {
            if (!nonErrataActions.containsKey(am.getType())) {
                nonErrataActions.put(am.getType(), new HashMap());
                if (am.getType().length() + HEADER_SPACER > longestActionLength) {
                    longestActionLength = am.getType().length() + HEADER_SPACER;
                }
            }
            Map<String, Integer> counts = nonErrataActions.get(am.getType());
            counts.put(am.getStatus(), am.getCount());
        }

    }

    hdr.append(StringUtils.repeat(" ", longestActionLength));
    for (String status : statusSet) {
        hdr.append(status + StringUtils.repeat(" ", (longestStatusLength + ERRATA_SPACER) - status.length()));
    }

    if (!errataActions.isEmpty()) {
        body.append(ERRATA_UPDATE + ":" + "\n");
    }
    StringBuffer formattedErrataActions = renderActionTree(longestActionLength, longestStatusLength, statusSet,
            errataActions);
    body.append(formattedErrataActions);

    for (String advisory : errataSynopsis.keySet()) {
        legend.append(ERRATA_INDENTION + advisory + ERRATA_INDENTION + errataSynopsis.get(advisory) + "\n");
    }

    StringBuffer formattedNonErrataActions = renderActionTree(longestActionLength, longestStatusLength,
            statusSet, nonErrataActions);
    body.append(formattedNonErrataActions);

    // finally put all this together
    msg.append(hdr.toString());
    msg.append("\n");
    msg.append(body.toString());
    msg.append("\n\n");
    if (!errataSynopsis.isEmpty()) {
        msg.append(legend.toString());
    }
    return msg.toString();
}

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

/**
 * This method does two things -//from  w  ww. j a v  a  2 s .co  m
 * 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
 * inconsistent tasks.
 * 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
 * created tasks for the taskGroup start indexing from after the latest published offsets.
 */
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
    final int groupId = taskGroup.groupId;
    final List<Pair<String, TreeMap<Integer, Map<Integer, Long>>>> taskSequences = new ArrayList<>();
    final List<ListenableFuture<TreeMap<Integer, Map<Integer, Long>>>> futures = new ArrayList<>();
    final List<String> taskIds = new ArrayList<>();

    for (String taskId : taskGroup.taskIds()) {
        final ListenableFuture<TreeMap<Integer, Map<Integer, Long>>> checkpointsFuture = taskClient
                .getCheckpointsAsync(taskId, true);
        taskIds.add(taskId);
        futures.add(checkpointsFuture);
    }

    try {
        List<TreeMap<Integer, Map<Integer, Long>>> futuresResult = Futures.successfulAsList(futures)
                .get(futureTimeoutInSeconds, TimeUnit.SECONDS);

        for (int i = 0; i < futuresResult.size(); i++) {
            final TreeMap<Integer, Map<Integer, Long>> checkpoints = futuresResult.get(i);
            final String taskId = taskIds.get(i);
            if (checkpoints == null) {
                try {
                    // catch the exception in failed futures
                    futures.get(i).get();
                } catch (Exception e) {
                    log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
                    killTask(taskId);
                    taskGroup.tasks.remove(taskId);
                }
            } else if (checkpoints.isEmpty()) {
                log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
            } else {
                taskSequences.add(new Pair<>(taskId, checkpoints));
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    final KafkaDataSourceMetadata latestDataSourceMetadata = (KafkaDataSourceMetadata) indexerMetadataStorageCoordinator
            .getDataSourceMetadata(dataSource);
    final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null
            && latestDataSourceMetadata.getKafkaPartitions() != null
            && ioConfig.getTopic().equals(latestDataSourceMetadata.getKafkaPartitions().getTopic());
    final Map<Integer, Long> latestOffsetsFromDb;
    if (hasValidOffsetsFromDb) {
        latestOffsetsFromDb = latestDataSourceMetadata.getKafkaPartitions().getPartitionOffsetMap();
    } else {
        latestOffsetsFromDb = null;
    }

    // order tasks of this taskGroup by the latest sequenceId
    taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));

    final Set<String> tasksToKill = new HashSet<>();
    final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
    int taskIndex = 0;

    while (taskIndex < taskSequences.size()) {
        TreeMap<Integer, Map<Integer, Long>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
        String taskId = taskSequences.get(taskIndex).lhs;
        if (earliestConsistentSequenceId.get() == -1) {
            // find the first replica task with earliest sequenceId consistent with datasource metadata in the metadata
            // store
            if (taskCheckpoints.entrySet().stream()
                    .anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream()
                            .allMatch(partitionOffset -> Longs.compare(partitionOffset.getValue(),
                                    latestOffsetsFromDb == null ? partitionOffset.getValue()
                                            : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(),
                                                    partitionOffset.getValue())) == 0)
                            && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey()))
                    || (pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() > 0
                            && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
                final SortedMap<Integer, Map<Integer, Long>> latestCheckpoints = new TreeMap<>(
                        taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
                log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
                taskGroup.sequenceOffsets.clear();
                taskGroup.sequenceOffsets.putAll(latestCheckpoints);
            } else {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId,
                        taskCheckpoints, latestOffsetsFromDb);
                tasksToKill.add(taskId);
            }
        } else {
            // check consistency with taskGroup sequences
            if (taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey()) == null
                    || !(taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey())
                            .equals(taskGroup.sequenceOffsets.firstEntry().getValue()))
                    || taskCheckpoints.tailMap(taskGroup.sequenceOffsets.firstKey())
                            .size() != taskGroup.sequenceOffsets.size()) {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId,
                        taskCheckpoints, taskGroup.sequenceOffsets);
                tasksToKill.add(taskId);
            }
        }
        taskIndex++;
    }

    if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0
            && pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() == 0)) {
        // killing all tasks or no task left in the group ?
        // clear state about the taskgroup so that get latest offset information is fetched from metadata store
        log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
        taskGroups.remove(groupId);
        partitionGroups.get(groupId).replaceAll((partition, offset) -> NOT_SET);
    }

    taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs))
            .forEach(sequenceCheckpoint -> {
                log.warn(
                        "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest "
                                + "persisted offsets in metadata store [%s]",
                        sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.sequenceOffsets,
                        latestOffsetsFromDb);
                killTask(sequenceCheckpoint.lhs);
                taskGroup.tasks.remove(sequenceCheckpoint.lhs);
            });
}

From source file:com.espertech.esper.rowregex.EventRowRegexNFAView.java

/**
 * Ctor.// w ww  .  j  a  v a  2s.com
 * @param compositeEventType final event type
 * @param rowEventType event type for input rows
 * @param matchRecognizeSpec specification
 * @param variableStreams variables and their assigned stream number
 * @param streamsVariables stream number and the assigned variable
 * @param variablesSingle single variables
 * @param callbacksPerIndex  for handling the 'prev' function
 * @param aggregationService handles aggregations
 * @param isUnbound true if unbound stream
 * @param isIterateOnly true for iterate-only
 * @param isSelectAsksMultimatches if asking for multimatches
 */
public EventRowRegexNFAView(EventType compositeEventType, EventType rowEventType,
        MatchRecognizeSpec matchRecognizeSpec, LinkedHashMap<String, Pair<Integer, Boolean>> variableStreams,
        Map<Integer, String> streamsVariables, Set<String> variablesSingle,
        AgentInstanceContext agentInstanceContext,
        TreeMap<Integer, List<ExprPreviousMatchRecognizeNode>> callbacksPerIndex,
        AggregationServiceMatchRecognize aggregationService, boolean isUnbound, boolean isIterateOnly,
        boolean isSelectAsksMultimatches) {
    this.matchRecognizeSpec = matchRecognizeSpec;
    this.compositeEventType = compositeEventType;
    this.rowEventType = rowEventType;
    this.variableStreams = variableStreams;
    this.variablesArray = variableStreams.keySet().toArray(new String[variableStreams.keySet().size()]);
    this.streamsVariables = streamsVariables;
    this.variablesSingle = variablesSingle;
    this.aggregationService = aggregationService;
    this.isUnbound = isUnbound;
    this.isIterateOnly = isIterateOnly;
    this.agentInstanceContext = agentInstanceContext;
    this.isSelectAsksMultimatches = isSelectAsksMultimatches;

    if (matchRecognizeSpec.getInterval() != null) {
        scheduleSlot = agentInstanceContext.getStatementContext().getScheduleBucket().allocateSlot();
        ScheduleHandleCallback callback = new ScheduleHandleCallback() {
            public void scheduledTrigger(ExtensionServicesContext extensionServicesContext) {
                EventRowRegexNFAView.this.triggered();
            }
        };
        handle = new EPStatementHandleCallback(agentInstanceContext.getEpStatementAgentInstanceHandle(),
                callback);
        schedule = new TreeMap<Long, Object>();

        agentInstanceContext.addTerminationCallback(this);
    } else {
        scheduleSlot = null;
        handle = null;
        schedule = null;
    }

    this.windowMatchedEventset = new LinkedHashSet<EventBean>();

    // handle "previous" function nodes (performance-optimized for direct index access)
    RegexPartitionStateRandomAccessGetter randomAccessByIndexGetter;
    if (!callbacksPerIndex.isEmpty()) {
        // Build an array of indexes
        int[] randomAccessIndexesRequested = new int[callbacksPerIndex.size()];
        int count = 0;
        for (Map.Entry<Integer, List<ExprPreviousMatchRecognizeNode>> entry : callbacksPerIndex.entrySet()) {
            randomAccessIndexesRequested[count] = entry.getKey();
            count++;
        }
        randomAccessByIndexGetter = new RegexPartitionStateRandomAccessGetter(randomAccessIndexesRequested,
                isUnbound);

        // Since an expression such as "prior(2, price), prior(8, price)" translates into {2, 8} the relative index is {0, 1}.
        // Map the expression-supplied index to a relative index
        count = 0;
        for (Map.Entry<Integer, List<ExprPreviousMatchRecognizeNode>> entry : callbacksPerIndex.entrySet()) {
            for (ExprPreviousMatchRecognizeNode callback : entry.getValue()) {
                callback.setGetter(randomAccessByIndexGetter);
                callback.setAssignedIndex(count);
            }
            count++;
        }
    } else {
        randomAccessByIndexGetter = null;
    }

    Map<String, ExprNode> variableDefinitions = new LinkedHashMap<String, ExprNode>();
    for (MatchRecognizeDefineItem defineItem : matchRecognizeSpec.getDefines()) {
        variableDefinitions.put(defineItem.getIdentifier(), defineItem.getExpression());
    }

    // build states
    RegexNFAStrandResult strand = EventRowRegexHelper.recursiveBuildStartStates(matchRecognizeSpec.getPattern(),
            variableDefinitions, variableStreams);
    startStates = strand.getStartStates().toArray(new RegexNFAState[strand.getStartStates().size()]);
    allStates = strand.getAllStates().toArray(new RegexNFAState[strand.getAllStates().size()]);

    if (log.isDebugEnabled() || IS_DEBUG) {
        log.info("NFA tree:\n" + print(startStates));
    }

    // create evaluators
    columnNames = new String[matchRecognizeSpec.getMeasures().size()];
    columnEvaluators = new ExprEvaluator[matchRecognizeSpec.getMeasures().size()];
    int count = 0;
    for (MatchRecognizeMeasureItem measureItem : matchRecognizeSpec.getMeasures()) {
        columnNames[count] = measureItem.getName();
        columnEvaluators[count] = measureItem.getExpr().getExprEvaluator();
        count++;
    }

    // create state repository
    if (this.matchRecognizeSpec.getPartitionByExpressions().isEmpty()) {
        regexPartitionStateRepo = new RegexPartitionStateRepoNoGroup(randomAccessByIndexGetter,
                matchRecognizeSpec.getInterval() != null);
    } else {
        regexPartitionStateRepo = new RegexPartitionStateRepoGroup(randomAccessByIndexGetter,
                ExprNodeUtility.getEvaluators(matchRecognizeSpec.getPartitionByExpressions()),
                matchRecognizeSpec.getInterval() != null, agentInstanceContext);
    }
}

From source file:com.nextgis.ngm_clink_monitoring.fragments.MapFragment.java

@Override
public void onSingleTapUp(MotionEvent event) {
    double dMinX = event.getX() - mTolerancePX;
    double dMaxX = event.getX() + mTolerancePX;
    double dMinY = event.getY() - mTolerancePX;
    double dMaxY = event.getY() + mTolerancePX;

    GeoEnvelope mapEnv = mMapView.screenToMap(new GeoEnvelope(dMinX, dMaxX, dMinY, dMaxY));
    if (null == mapEnv) {
        return;/*w  w  w  . j  a  v a2s .c  o  m*/
    }

    //show actions dialog
    List<ILayer> layers = mMapView.getVectorLayersByType(GeoConstants.GTAnyCheck);

    TreeMap<Integer, Integer> priorityMap = new TreeMap<>();
    List<FoclVectorLayer> foclVectorLayers = new ArrayList<>();

    for (ILayer layer : layers) {
        if (!layer.isValid()) {
            continue;
        }
        ILayerView layerView = (ILayerView) layer;
        if (!layerView.isVisible()) {
            continue;
        }

        FoclVectorLayer foclVectorLayer = (FoclVectorLayer) layer;
        List<Long> items = foclVectorLayer.query(mapEnv);
        if (!items.isEmpty()) {
            foclVectorLayers.add(foclVectorLayer);

            int type = foclVectorLayer.getFoclLayerType();
            int priority;

            switch (type) {
            case FoclConstants.LAYERTYPE_FOCL_UNKNOWN:
            default:
                priority = 0;
                break;

            case FoclConstants.LAYERTYPE_FOCL_OPTICAL_CABLE:
                priority = 1;
                break;

            case FoclConstants.LAYERTYPE_FOCL_SPECIAL_TRANSITION:
                priority = 2;
                break;

            case FoclConstants.LAYERTYPE_FOCL_FOSC:
                priority = 3;
                break;

            case FoclConstants.LAYERTYPE_FOCL_OPTICAL_CROSS:
                priority = 4;
                break;

            case FoclConstants.LAYERTYPE_FOCL_ACCESS_POINT:
                priority = 5;
                break;

            case FoclConstants.LAYERTYPE_FOCL_REAL_OPTICAL_CABLE_POINT:
                priority = 6;
                break;

            case FoclConstants.LAYERTYPE_FOCL_REAL_FOSC:
                priority = 7;
                break;

            case FoclConstants.LAYERTYPE_FOCL_REAL_OPTICAL_CROSS:
                priority = 8;
                break;

            case FoclConstants.LAYERTYPE_FOCL_REAL_ACCESS_POINT:
                priority = 9;
                break;

            case FoclConstants.LAYERTYPE_FOCL_REAL_SPECIAL_TRANSITION_POINT:
                priority = 10;
                break;
            }

            if (!priorityMap.containsKey(priority)) {
                priorityMap.put(priority, type);
            }
        }
    }

    Integer type = null;
    if (!priorityMap.isEmpty()) {
        Integer key = priorityMap.lastKey();
        type = priorityMap.get(key);
    }

    if (null != type) {
        for (FoclVectorLayer layer : foclVectorLayers) {
            if (type == layer.getFoclLayerType()) {
                List<Long> items = layer.query(mapEnv);

                AttributesDialog attributesDialog = new AttributesDialog();
                attributesDialog.setKeepInstance(true);
                attributesDialog.setParams(layer, items.get(0));
                attributesDialog.show(getActivity().getSupportFragmentManager(),
                        FoclConstants.FRAGMENT_ATTRIBUTES);

                break;
            }
        }
    }
}

From source file:com.mfizz.observer.core.ServiceObserver.java

private void doSnapshotAll(SnapshotAllResult result) throws Exception {
    ///*  www.j  a v a 2 s . c o  m*/
    // create list of snapshots that will be executed
    //
    ArrayList<SnapshotTask> snapshotTasks = new ArrayList<SnapshotTask>();
    for (Observer<D> observer : observers.values()) {
        snapshotTasks.add(new SnapshotTask(observer, result.beginTimestamp));
    }
    result.snapshotsAttempted = snapshotTasks.size();

    // this will run all the update tasks and wait for them all to finish
    executor.invokeAll(snapshotTasks);

    // create an aggregate for each group
    TreeMap<String, ObserveAggregateSnapshot<A>> aggs = new TreeMap<String, ObserveAggregateSnapshot<A>>();

    // process deltas from each observer
    for (Observer<D> observer : observers.values()) {

        // determine if last snapshot completed or failed
        if (observer.getConsecutiveSnapshotCompletedCount() > 0) {
            result.snapshotsCompleted++;
        } else {
            result.snapshotsFailed++;
        }

        // was this the first snapshot attempt for this observer?
        long snapshotAttempts = observer.getSnapshotAttemptCounter();

        // each group will aggregate the same delta snapshot from each observer
        ObserveDeltaSnapshot<D> ods = observer.getDeltaSnapshot();

        if (ods == null) {
            //logger.debug("delta snapshot for observer {} was null", observer.getName());
            SnapshotException e = observer.getException();
            if (e == null) {
                if (snapshotAttempts <= 1) {
                    // first runs we don't expect any deltas
                } else {
                    logger.error(
                            "observer [{}] for service [{}] had null delta AND exception values (previous snapshot maybe failed?)",
                            observer.getName(), getServiceName());
                }
            } else {
                // this is now logged in SnapshotTask below
                //logger.warn("exception during snapshot for observer " + observer.getName(), e);
            }
        } else {
            // period should be the same across all deltas
            TimePeriod period = ods.getPeriod();
            // TODO: verify periods match each other as safety check?

            // create or get aggregate for each group this observer belongs to
            for (String group : observer.configuration.getGroups()) {
                ObserveAggregateSnapshot<A> oas = aggs.get(group);
                if (oas == null) {
                    oas = new ObserveAggregateSnapshot<A>(period, aggregateClass.newInstance());
                    aggs.put(group, oas);
                }
                oas.add(observer.getName(), ods.getData());
            }
        }
    }

    if (snapshotAllAttemptedCounter.get() > 1 && aggs.isEmpty()) {
        logger.warn("snapshotAll() for service [{}] generated no aggregated snapshots!", this.getServiceName());
    }

    // at this point, the new snapshots from each observer have generated
    // new aggregates for this point-in-time -- add this to our rolling time series
    for (String group : aggs.keySet()) {
        // last aggregate snapshot
        ObserveAggregateSnapshot<A> oas = aggs.get(group);

        // get or create new series of aggregate snapshots for each group
        TimeSeries<ObserveAggregateSnapshot<A>> aggseries = snapshots.get(group);

        if (aggseries == null) {
            // figure out capacity of time series (retentionTime / step + fudgeFactor)
            long retentionMillis = getRetentionMillis();
            int initialCapacity = (int) (retentionMillis / this.serviceConfig.getStepMillis()) + 2;
            logger.info(
                    "Creating new TimeSeries for service [{}] group [{}] with retentionMillis="
                            + retentionMillis + "; initialCapacity=" + initialCapacity,
                    getServiceName(), group);
            aggseries = new TimeSeries<ObserveAggregateSnapshot<A>>(retentionMillis, initialCapacity);
            snapshots.put(group, aggseries);
        }

        // add aggregate snapshot to the time series for each group
        // this will also prune old snapshots that are older than the retention period
        // the timestamp of the aggregate becomes the relative "now" timestamp for calculating retentions
        // this is how we'll always at least keep "current" times
        aggseries.add(oas, oas.getTimestamp());

        // create an updated summary for each interval for this group
        SummaryGroupFactory<S, A> sfg = new SummaryGroupFactory<S, A>(oas.getTimestamp(), this.summaryClass,
                this.serviceConfig.getPeriods());

        sfg.beginAll();

        Iterator<ObserveAggregateSnapshot<A>> it = aggseries.getSeries().iterator();
        while (it.hasNext()) {
            ObserveAggregateSnapshot<A> tempoas = it.next();
            sfg.summarize(tempoas.getPeriod(), tempoas.getAggregate());
        }

        sfg.completeAll();

        SummaryGroup<S> sg = sfg.createSummaryGroup();
        summary.put(group, sg);
    }
}

From source file:me.oriley.crate.CrateGenerator.java

private void listFiles(@NonNull TreeMap<String, Asset> allAssets, @NonNull TypeSpec.Builder parentBuilder,
        @NonNull String classPathString, @NonNull File directory, @NonNull String variantAssetDir,
        boolean root) {

    String rootName = root ? ASSETS : directory.getName();
    TypeSpec.Builder builder = TypeSpec.classBuilder(capitalise(rootName + CLASS)).addModifiers(PUBLIC, STATIC,
            FINAL);//from  ww  w. j a  va  2  s.  com

    List<File> files = getFileList(directory);
    TreeMap<String, Asset> assetMap = new TreeMap<>();
    boolean isFontFolder = true;
    boolean isImageFolder = true;

    for (File file : files) {
        if (file.isDirectory()) {
            listFiles(allAssets, builder, classPathString + file.getName() + ".", file, variantAssetDir, false);
        } else {
            String fileName = file.getName();
            String fieldName = sanitiseFieldName(fileName).toUpperCase(US);

            if (assetMap.containsKey(fieldName)) {
                String baseFieldName = fieldName + "_";
                int counter = 0;
                while (assetMap.containsKey(fieldName)) {
                    fieldName = baseFieldName + counter;
                }
            }

            String filePath = file.getPath().replace(variantAssetDir + "/", "");

            String fileExtension = getFileExtension(fileName).toLowerCase(US);
            AssetHolder asset;
            if (FONT_EXTENSIONS.contains(fileExtension)) {
                isImageFolder = false;
                String fontName = getFontName(file.getPath());
                asset = new FontAssetHolder(fieldName, filePath, fileName,
                        fontName != null ? fontName : fileName);
                builder.addField(createFontAssetField((FontAssetHolder) asset));
            } else if (IMAGE_EXTENSIONS.contains(fileExtension)) {
                isFontFolder = false;

                int width = 0;
                int height = 0;
                try {
                    BufferedImage image = ImageIO.read(file);
                    if (image != null) {
                        width = image.getWidth();
                        height = image.getHeight();
                    }
                } catch (IOException e) {
                    logError("Error parsing image: " + file.getPath(), e, false);
                }

                asset = new ImageAssetHolder(fieldName, filePath, fileName, width, height);
                builder.addField(createImageAssetField((ImageAssetHolder) asset));
            } else {
                isFontFolder = false;
                isImageFolder = false;
                asset = new AssetHolder(fieldName, filePath, fileName);
                builder.addField(createAssetField(asset));
            }
            assetMap.put(fieldName, asset);
            allAssets.put(classPathString + fieldName, asset);
        }
    }

    if (!assetMap.isEmpty()) {
        TypeName elementType = TypeVariableName
                .get(isFontFolder ? FontAsset.class : isImageFolder ? ImageAsset.class : Asset.class);
        TypeName listType = ParameterizedTypeName.get(ClassName.get(List.class), elementType);
        builder.addField(createListField(listType, "LIST", assetMap));
    }

    if (root && !allAssets.isEmpty()) {
        TypeName listType = ParameterizedTypeName.get(ClassName.get(List.class),
                TypeVariableName.get(Asset.class));
        builder.addField(createListField(listType, "FULL_LIST", allAssets));
    }

    parentBuilder.addType(builder.build());
    parentBuilder.addField(createNonStaticClassField(rootName));
}

From source file:org.onebusaway.nyc.sms.actions.IndexAction.java

private String singleStopResponse(String message) throws Exception {

    if (message == null) {
        message = "";
    }/* ww w  . j a v  a2s  .c  o  m*/
    message = message.trim();
    if (!message.isEmpty()) {
        message = "\n" + message + "\n";
    }

    StopResult stopResult = (StopResult) _searchResults.getMatches().get(0);

    String header = "Stop " + stopResult.getIdWithoutAgency() + "\n\n";

    String footer = "\nSend:\n";
    footer += "R for refresh\n";
    if (_searchResults.getRouteFilter().isEmpty() && stopResult.getStop().getRoutes().size() > 1) {
        footer += stopResult.getIdWithoutAgency() + "+ROUTE for bus info\n";
    }

    // worst case for footer length
    String alertsFooter = footer + "C+ROUTE for *svc alert\n";

    // body content for stops
    String body = "";
    if (stopResult.getRoutesAvailable().size() == 0) {
        // if we found a stop with no routes because of a stop+route filter, 
        // indicate that specifically
        if (_searchResults.getRouteFilter().size() > 0) {
            body += "No filter matches\n";
        } else {
            body += "No routes\n";
        }
    } else {
        // bulid map of sorted vehicle observation strings for this stop, sorted by closest->farthest
        TreeMap<Double, String> observationsByDistanceFromStopAcrossAllRoutes = new TreeMap<Double, String>();
        // Keep track of not scheduled and not en route so we can display that later
        Set<String> notScheduledRoutes = new HashSet<String>();
        Set<String> notEnRouteRoutes = new HashSet<String>();

        for (RouteAtStop routeHere : stopResult.getRoutesAvailable()) {

            if (_searchResults.getRouteFilter() != null && !_searchResults.getRouteFilter().isEmpty()
                    && !_searchResults.getRouteFilter().contains(routeHere.getRoute())) {
                continue;
            }

            for (RouteDirection direction : routeHere.getDirections()) {
                String prefix = "";
                if (!direction.getSerivceAlerts().isEmpty()) {
                    footer = alertsFooter;
                    prefix += "*";
                }
                prefix += routeHere.getShortName();

                if (!direction.hasUpcomingScheduledService() && direction.getDistanceAways().isEmpty()) {
                    notScheduledRoutes.add(prefix);
                } else {
                    if (!direction.getDistanceAways().isEmpty()) {
                        HashMap<Double, String> sortableDistanceAways = direction.getDistanceAwaysWithSortKey();
                        for (Double distanceAway : sortableDistanceAways.keySet()) {
                            String distanceAwayString = sortableDistanceAways.get(distanceAway);

                            observationsByDistanceFromStopAcrossAllRoutes.put(distanceAway,
                                    prefix + ": " + distanceAwayString);
                        }
                    } else {
                        notEnRouteRoutes.add(prefix);
                    }
                }
            }
        }

        // if there are no upcoming buses, provide info about the routes that are not en route or not scheduled
        if (observationsByDistanceFromStopAcrossAllRoutes.isEmpty()) {
            if (notEnRouteRoutes.size() > 0) {
                body += StringUtils.join(notEnRouteRoutes, ",") + ": no buses en-route\n";
            }
            if (notScheduledRoutes.size() > 0) {
                body += StringUtils.join(notScheduledRoutes, ",") + ": not scheduled\n";
            }
            // as many observations as will fit, sorted by soonest to arrive out
        } else {
            for (String observationString : observationsByDistanceFromStopAcrossAllRoutes.values()) {
                String textToAdd = observationString + "\n";

                if (message.length() + body.length() + header.length() + alertsFooter.length()
                        + textToAdd.length() < MAX_SMS_CHARACTER_COUNT) {
                    body += textToAdd;
                } else {
                    break;
                }
            }
        }
    }

    if (_googleAnalytics != null) {
        try {
            _googleAnalytics.trackEvent("SMS", "Stop Realtime Response for Single Stop", _query);
        } catch (Exception e) {
            //discard
        }
    }

    return header + body + message + footer;
}

From source file:org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.java

/**
 * Loads initial store files that were picked up from some physical location pertaining to
 * this store (presumably). Unlike adding files after compaction, assumes empty initial
 * sets, and is forgiving with regard to stripe constraints - at worst, many/all files will
 * go to level 0./*from   w ww  .  ja va 2s  . com*/
 * @param storeFiles Store files to add.
 */
private void loadUnclassifiedStoreFiles(List<StoreFile> storeFiles) {
    LOG.debug("Attempting to load " + storeFiles.size() + " store files.");
    TreeMap<byte[], ArrayList<StoreFile>> candidateStripes = new TreeMap<byte[], ArrayList<StoreFile>>(
            MAP_COMPARATOR);
    ArrayList<StoreFile> level0Files = new ArrayList<StoreFile>();
    // Separate the files into tentative stripes; then validate. Currently, we rely on metadata.
    // If needed, we could dynamically determine the stripes in future.
    for (StoreFile sf : storeFiles) {
        byte[] startRow = startOf(sf), endRow = endOf(sf);
        // Validate the range and put the files into place.
        if (isInvalid(startRow) || isInvalid(endRow)) {
            insertFileIntoStripe(level0Files, sf); // No metadata - goes to L0.
            ensureLevel0Metadata(sf);
        } else if (!isOpen(startRow) && !isOpen(endRow) && nonOpenRowCompare(startRow, endRow) >= 0) {
            LOG.error("Unexpected metadata - start row [" + Bytes.toString(startRow) + "], end row ["
                    + Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0");
            insertFileIntoStripe(level0Files, sf); // Bad metadata - goes to L0 also.
            ensureLevel0Metadata(sf);
        } else {
            ArrayList<StoreFile> stripe = candidateStripes.get(endRow);
            if (stripe == null) {
                stripe = new ArrayList<StoreFile>();
                candidateStripes.put(endRow, stripe);
            }
            insertFileIntoStripe(stripe, sf);
        }
    }
    // Possible improvement - for variable-count stripes, if all the files are in L0, we can
    // instead create single, open-ended stripe with all files.

    boolean hasOverlaps = false;
    byte[] expectedStartRow = null; // first stripe can start wherever
    Iterator<Map.Entry<byte[], ArrayList<StoreFile>>> entryIter = candidateStripes.entrySet().iterator();
    while (entryIter.hasNext()) {
        Map.Entry<byte[], ArrayList<StoreFile>> entry = entryIter.next();
        ArrayList<StoreFile> files = entry.getValue();
        // Validate the file start rows, and remove the bad ones to level 0.
        for (int i = 0; i < files.size(); ++i) {
            StoreFile sf = files.get(i);
            byte[] startRow = startOf(sf);
            if (expectedStartRow == null) {
                expectedStartRow = startRow; // ensure that first stripe is still consistent
            } else if (!rowEquals(expectedStartRow, startRow)) {
                hasOverlaps = true;
                LOG.warn("Store file doesn't fit into the tentative stripes - expected to start at ["
                        + Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow)
                        + "], to L0 it goes");
                StoreFile badSf = files.remove(i);
                insertFileIntoStripe(level0Files, badSf);
                ensureLevel0Metadata(badSf);
                --i;
            }
        }
        // Check if any files from the candidate stripe are valid. If so, add a stripe.
        byte[] endRow = entry.getKey();
        if (!files.isEmpty()) {
            expectedStartRow = endRow; // Next stripe must start exactly at that key.
        } else {
            entryIter.remove();
        }
    }

    // In the end, there must be open ends on two sides. If not, and there were no errors i.e.
    // files are consistent, they might be coming from a split. We will treat the boundaries
    // as open keys anyway, and log the message.
    // If there were errors, we'll play it safe and dump everything into L0.
    if (!candidateStripes.isEmpty()) {
        StoreFile firstFile = candidateStripes.firstEntry().getValue().get(0);
        boolean isOpen = isOpen(startOf(firstFile)) && isOpen(candidateStripes.lastKey());
        if (!isOpen) {
            LOG.warn("The range of the loaded files does not cover full key space: from ["
                    + Bytes.toString(startOf(firstFile)) + "], to ["
                    + Bytes.toString(candidateStripes.lastKey()) + "]");
            if (!hasOverlaps) {
                ensureEdgeStripeMetadata(candidateStripes.firstEntry().getValue(), true);
                ensureEdgeStripeMetadata(candidateStripes.lastEntry().getValue(), false);
            } else {
                LOG.warn("Inconsistent files, everything goes to L0.");
                for (ArrayList<StoreFile> files : candidateStripes.values()) {
                    for (StoreFile sf : files) {
                        insertFileIntoStripe(level0Files, sf);
                        ensureLevel0Metadata(sf);
                    }
                }
                candidateStripes.clear();
            }
        }
    }

    // Copy the results into the fields.
    State state = new State();
    state.level0Files = ImmutableList.copyOf(level0Files);
    state.stripeFiles = new ArrayList<ImmutableList<StoreFile>>(candidateStripes.size());
    state.stripeEndRows = new byte[Math.max(0, candidateStripes.size() - 1)][];
    ArrayList<StoreFile> newAllFiles = new ArrayList<StoreFile>(level0Files);
    int i = candidateStripes.size() - 1;
    for (Map.Entry<byte[], ArrayList<StoreFile>> entry : candidateStripes.entrySet()) {
        state.stripeFiles.add(ImmutableList.copyOf(entry.getValue()));
        newAllFiles.addAll(entry.getValue());
        if (i > 0) {
            state.stripeEndRows[state.stripeFiles.size() - 1] = entry.getKey();
        }
        --i;
    }
    state.allFilesCached = ImmutableList.copyOf(newAllFiles);
    this.state = state;
    debugDumpState("Files loaded");
}

From source file:org.apache.nutch.segment.SegmentMerger.java

/**
 * NOTE: in selecting the latest version we rely exclusively on the segment
 * name (not all segment data contain time information). Therefore it is extremely
 * important that segments be named in an increasing lexicographic order as
 * their creation time increases./*  ww w  .j a v  a 2 s .  c o m*/
 */
public void reduce(Text key, Iterator<MetaWrapper> values, OutputCollector<Text, MetaWrapper> output,
        Reporter reporter) throws IOException {
    CrawlDatum lastG = null;
    CrawlDatum lastF = null;
    CrawlDatum lastSig = null;
    Content lastC = null;
    ParseData lastPD = null;
    ParseText lastPT = null;
    String lastGname = null;
    String lastFname = null;
    String lastSigname = null;
    String lastCname = null;
    String lastPDname = null;
    String lastPTname = null;
    TreeMap<String, ArrayList<CrawlDatum>> linked = new TreeMap<String, ArrayList<CrawlDatum>>();
    while (values.hasNext()) {
        MetaWrapper wrapper = values.next();
        Object o = wrapper.get();
        String spString = wrapper.getMeta(SEGMENT_PART_KEY);
        if (spString == null) {
            throw new IOException("Null segment part, key=" + key);
        }
        SegmentPart sp = SegmentPart.parse(spString);
        if (o instanceof CrawlDatum) {
            CrawlDatum val = (CrawlDatum) o;
            // check which output dir it belongs to
            if (sp.partName.equals(CrawlDatum.GENERATE_DIR_NAME)) {
                if (lastG == null) {
                    lastG = val;
                    lastGname = sp.segmentName;
                } else {
                    // take newer
                    if (lastGname.compareTo(sp.segmentName) < 0) {
                        lastG = val;
                        lastGname = sp.segmentName;
                    }
                }
            } else if (sp.partName.equals(CrawlDatum.FETCH_DIR_NAME)) {
                if (lastF == null) {
                    lastF = val;
                    lastFname = sp.segmentName;
                } else {
                    // take newer
                    if (lastFname.compareTo(sp.segmentName) < 0) {
                        lastF = val;
                        lastFname = sp.segmentName;
                    }
                }
            } else if (sp.partName.equals(CrawlDatum.PARSE_DIR_NAME)) {
                if (val.getStatus() == CrawlDatum.STATUS_SIGNATURE) {
                    if (lastSig == null) {
                        lastSig = val;
                        lastSigname = sp.segmentName;
                    } else {
                        // take newer
                        if (lastSigname.compareTo(sp.segmentName) < 0) {
                            lastSig = val;
                            lastSigname = sp.segmentName;
                        }
                    }
                    continue;
                }
                // collect all LINKED values from the latest segment
                ArrayList<CrawlDatum> segLinked = linked.get(sp.segmentName);
                if (segLinked == null) {
                    segLinked = new ArrayList<CrawlDatum>();
                    linked.put(sp.segmentName, segLinked);
                }
                segLinked.add(val);
            } else {
                throw new IOException("Cannot determine segment part: " + sp.partName);
            }
        } else if (o instanceof Content) {
            if (lastC == null) {
                lastC = (Content) o;
                lastCname = sp.segmentName;
            } else {
                if (lastCname.compareTo(sp.segmentName) < 0) {
                    lastC = (Content) o;
                    lastCname = sp.segmentName;
                }
            }
        } else if (o instanceof ParseData) {
            if (lastPD == null) {
                lastPD = (ParseData) o;
                lastPDname = sp.segmentName;
            } else {
                if (lastPDname.compareTo(sp.segmentName) < 0) {
                    lastPD = (ParseData) o;
                    lastPDname = sp.segmentName;
                }
            }
        } else if (o instanceof ParseText) {
            if (lastPT == null) {
                lastPT = (ParseText) o;
                lastPTname = sp.segmentName;
            } else {
                if (lastPTname.compareTo(sp.segmentName) < 0) {
                    lastPT = (ParseText) o;
                    lastPTname = sp.segmentName;
                }
            }
        }
    }
    // perform filtering based on full merge record
    if (mergeFilters != null && !mergeFilters.filter(key, lastG, lastF, lastSig, lastC, lastPD, lastPT,
            linked.isEmpty() ? null : linked.lastEntry().getValue())) {
        return;
    }

    curCount++;
    String sliceName = null;
    MetaWrapper wrapper = new MetaWrapper();
    if (sliceSize > 0) {
        sliceName = String.valueOf(curCount / sliceSize);
        wrapper.setMeta(SEGMENT_SLICE_KEY, sliceName);
    }
    SegmentPart sp = new SegmentPart();
    // now output the latest values
    if (lastG != null) {
        wrapper.set(lastG);
        sp.partName = CrawlDatum.GENERATE_DIR_NAME;
        sp.segmentName = lastGname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastF != null) {
        wrapper.set(lastF);
        sp.partName = CrawlDatum.FETCH_DIR_NAME;
        sp.segmentName = lastFname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastSig != null) {
        wrapper.set(lastSig);
        sp.partName = CrawlDatum.PARSE_DIR_NAME;
        sp.segmentName = lastSigname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastC != null) {
        wrapper.set(lastC);
        sp.partName = Content.DIR_NAME;
        sp.segmentName = lastCname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastPD != null) {
        wrapper.set(lastPD);
        sp.partName = ParseData.DIR_NAME;
        sp.segmentName = lastPDname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastPT != null) {
        wrapper.set(lastPT);
        sp.partName = ParseText.DIR_NAME;
        sp.segmentName = lastPTname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (linked.size() > 0) {
        String name = linked.lastKey();
        sp.partName = CrawlDatum.PARSE_DIR_NAME;
        sp.segmentName = name;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        ArrayList<CrawlDatum> segLinked = linked.get(name);
        for (int i = 0; i < segLinked.size(); i++) {
            CrawlDatum link = segLinked.get(i);
            wrapper.set(link);
            output.collect(key, wrapper);
        }
    }
}