Example usage for java.lang String CASE_INSENSITIVE_ORDER

List of usage examples for java.lang String CASE_INSENSITIVE_ORDER

Introduction

In this page you can find the example usage for java.lang String CASE_INSENSITIVE_ORDER.

Prototype

Comparator CASE_INSENSITIVE_ORDER

To view the source code for java.lang String CASE_INSENSITIVE_ORDER.

Click Source Link

Document

A Comparator that orders String objects as by compareToIgnoreCase .

Usage

From source file:org.apache.directory.fortress.core.impl.AdminRoleUtil.java

/**
 * Return Set of {@link org.apache.directory.fortress.core.model.AdminRole#name}s ascendants.  Used by {@link org.apache.directory.fortress.core.impl.PermDAO#checkPermission}
 * for computing authorized {@link org.apache.directory.fortress.core.model.UserAdminRole#name}s.
 * @param uRoles contains list of adminRoles activated within a {@link org.apache.directory.fortress.core.model.User}'s {@link org.apache.directory.fortress.core.model.Session}.
 * @param contextId maps to sub-tree in DIT, for example ou=contextId, dc=jts, dc = com.
 * @return contains Set of all authorized adminRoles for a given User.
 *//* w  w w  .  j  a  v  a2  s  . com*/
public static Set<String> getInheritedRoles(List<UserAdminRole> uRoles, String contextId) {
    // create Set with case insensitive comparator:
    Set<String> iRoles = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);

    if (CollectionUtils.isNotEmpty(uRoles)) {
        for (UserAdminRole uRole : uRoles) {
            String rleName = uRole.getName();
            iRoles.add(rleName);
            Set<String> parents = HierUtil.getAscendants(rleName, getGraph(contextId));

            if (CollectionUtils.isNotEmpty(parents)) {
                iRoles.addAll(parents);
            }
        }
    }
    return iRoles;
}

From source file:eu.planets_project.tb.gui.backing.exp.ResultsForDigitalObjectBean.java

/**
 * Returns all information when the experiment's result type is: 'PropertiesListResult' i.e.
 * all wee workflows use this result type to pass on information.
 * @return//from ww  w .  j av a  2 s.  c om
 */
public List<String> getResultPropertiesList() {
    List<String> ret = new ArrayList<String>();
    if (this.getExecutionRecord() == null || this.getExecutionRecord().getReportLog() == null)
        return null;
    try {
        Properties ps = this.getExecutionRecord().getPropertiesListResult();
        if (ps != null) {
            Enumeration enumeration = ps.keys();
            while (enumeration.hasMoreElements()) {
                String key = (String) enumeration.nextElement();
                String value = ps.getProperty(key);
                if (!key.startsWith(ExecutionRecordImpl.RESULT_PROPERTY_URI)) {
                    ret.add("[" + key + "= " + value + "]");
                }
            }
            // Sort list in Case-insensitive sort
            Collections.sort(ret, String.CASE_INSENSITIVE_ORDER);
            return ret;
        }
    } catch (IOException e) {
        log.debug("unable to fetch the resultPropertiesList in ResultsForDigitalObjectBean " + e);
        return null;
    }
    return null;
}

From source file:org.docrj.smartcard.reader.BatchSelectActivity.java

@Override
public void onResume() {
    super.onResume();

    // restore persistent data
    SharedPreferences ss = getSharedPreferences("prefs", Context.MODE_PRIVATE);

    Gson gson = new Gson();
    Type collectionType;/*from   w  w  w . ja  v  a  2s  .co m*/
    String json = ss.getString("apps", null);
    if (json != null) {
        collectionType = new TypeToken<ArrayList<SmartcardApp>>() {
        }.getType();
        mApps = gson.fromJson(json, collectionType);
    }

    mGroups = new LinkedHashSet<>();
    json = ss.getString("groups", null);
    if (json != null) {
        collectionType = new TypeToken<LinkedHashSet<String>>() {
        }.getType();
        mGroups = gson.fromJson(json, collectionType);
    }
    mGroups.addAll(Arrays.asList(DEFAULT_GROUPS));

    // alphabetize, case insensitive
    List<String> groupList = new ArrayList<>(mGroups);
    Collections.sort(groupList, String.CASE_INSENSITIVE_ORDER);

    mSelectedGrpPos = ss.getInt("selected_grp_pos", mSelectedGrpPos);

    // do not clear messages for this selection on resume;
    // setAdapter and setSelection result in onItemSelected callback
    mSelectInInit = true;

    mGrpAdapter.clear();
    mGrpToMembersMap.clear();
    for (String group : groupList) {
        ArrayList<SmartcardApp> memberApps = Util.findGroupMembers(group, mApps);
        Collections.sort(memberApps, SmartcardApp.nameComparator);
        int pos = mGrpAdapter.addGroup(group, memberApps);
        mGrpToMembersMap.put(pos, memberApps);
    }
    mGrpSpinner.setAdapter(mGrpAdapter);
    mGrpSpinner.setSelection(mSelectedGrpPos);

    mNfcManager.onResume();
    mConsole.onResume();
    mNavDrawer.onResume();
    initSoundPool();
}

From source file:core.com.qiniu.auth.AWS4Signer.java

protected String getSignedHeadersString(Request<?> request) {
    List<String> sortedHeaders = new ArrayList<String>();
    sortedHeaders.addAll(request.getHeaders().keySet());
    Collections.sort(sortedHeaders, String.CASE_INSENSITIVE_ORDER);

    StringBuilder buffer = new StringBuilder();
    for (String header : sortedHeaders) {
        if (needsSign(header)) {
            if (buffer.length() > 0)
                buffer.append(";");
            buffer.append(StringUtils.lowerCase(header));
        }/*from ww w . j  a  v  a 2  s.  c  o  m*/
    }

    return buffer.toString();
}

From source file:org.usergrid.utils.MapUtils.java

/**
 * @param <A>/*w  w w  .j ava  2 s  .com*/
 * @param <B>
 * @param <C>
 * @param <D>
 * @param map
 * @param ignore_case
 * @param a
 * @param b
 * @param c
 * @param d
 */
@SuppressWarnings("unchecked")
public static <A, B, C, D> void addMapMapMapList(Map<A, Map<B, Map<C, List<D>>>> map, boolean ignore_case, A a,
        B b, C c, D d) {

    Map<B, Map<C, List<D>>> map_b = map.get(a);
    if (map_b == null) {
        if (ignore_case && (b instanceof String)) {
            map_b = (Map<B, Map<C, List<D>>>) new TreeMap<String, Map<C, List<D>>>(
                    String.CASE_INSENSITIVE_ORDER);
        } else {
            map_b = new LinkedHashMap<B, Map<C, List<D>>>();
        }
        map.put(a, map_b);
    }

    addMapMapList(map_b, ignore_case, b, c, d);
}

From source file:com.metamx.druid.realtime.plumber.RealtimePlumberSchool.java

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();/*from w  w  w .j a  v  a 2s  . c o  m*/

    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    return new Plumber() {
        private volatile boolean stopped = false;
        private volatile ExecutorService persistExecutor = null;
        private volatile ScheduledExecutorService scheduledExecutor = null;

        private final Map<Long, Sink> sinks = Maps.newConcurrentMap();
        private final VersionedIntervalTimeline<String, Sink> sinkTimeline = new VersionedIntervalTimeline<String, Sink>(
                String.CASE_INSENSITIVE_ORDER);

        @Override
        public void startJob() {
            computeBaseDir(schema).mkdirs();
            initializeExecutors();
            bootstrapSinksFromDisk();
            registerServerViewCallback();
            startPersistThread();
        }

        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                final Interval sinkInterval = new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime)));

                retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval));

                try {
                    segmentAnnouncer.announceSegment(retVal.getSegment());
                    sinks.put(truncatedTime, retVal);
                    sinkTimeline.add(retVal.getInterval(), retVal.getVersion(),
                            new SingleElementPartitionChunk<Sink>(retVal));
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList();
            for (Interval interval : query.getIntervals()) {
                querySinks.addAll(sinkTimeline.lookup(interval));
            }

            return toolchest.mergeResults(factory.mergeRunners(EXEC, FunctionalIterable.create(querySinks)
                    .transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() {
                        @Override
                        public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) {
                            final Sink theSink = holder.getObject().getChunk(0).getObject();
                            return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(emitter,
                                    builderFn, factory.mergeRunners(EXEC, Iterables.transform(theSink,
                                            new Function<FireHydrant, QueryRunner<T>>() {
                                                @Override
                                                public QueryRunner<T> apply(FireHydrant input) {
                                                    return factory.createRunner(input.getSegment());
                                                }
                                            }))),
                                    new SpecificSegmentSpec(new SegmentDescriptor(holder.getInterval(),
                                            theSink.getSegment().getVersion(),
                                            theSink.getSegment().getShardSpec().getPartitionNum())));
                        }
                    })));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        // Submits persist-n-merge task for a Sink to the persistExecutor
        private void persistAndMerge(final long truncatedTime, final Sink sink) {
            final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                    new DateTime(truncatedTime));
            persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                @Override
                public void doRun() {
                    final Interval interval = sink.getInterval();

                    for (FireHydrant hydrant : sink) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }

                    final File mergedTarget = new File(computePersistDir(schema, interval), "merged");
                    if (mergedTarget.exists()) {
                        log.info("Skipping already-merged sink: %s", sink);
                        return;
                    }

                    File mergedFile = null;
                    try {
                        List<QueryableIndex> indexes = Lists.newArrayList();
                        for (FireHydrant fireHydrant : sink) {
                            Segment segment = fireHydrant.getSegment();
                            final QueryableIndex queryableIndex = segment.asQueryableIndex();
                            log.info("Adding hydrant[%s]", fireHydrant);
                            indexes.add(queryableIndex);
                        }

                        mergedFile = IndexMerger.mergeQueryableIndex(indexes, schema.getAggregators(),
                                mergedTarget);

                        QueryableIndex index = IndexIO.loadIndex(mergedFile);

                        DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment()
                                .withDimensions(Lists.newArrayList(index.getAvailableDimensions())));

                        segmentPublisher.publishSegment(segment);
                    } catch (IOException e) {
                        log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource())
                                .addData("interval", interval).emit();
                    }

                    if (mergedFile != null) {
                        try {
                            if (mergedFile != null) {
                                log.info("Deleting Index File[%s]", mergedFile);
                                FileUtils.deleteDirectory(mergedFile);
                            }
                        } catch (IOException e) {
                            log.warn(e, "Error deleting directory[%s]", mergedFile);
                        }
                    }
                }
            });
        }

        @Override
        public void finishJob() {
            log.info("Shutting down...");

            for (final Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                persistAndMerge(entry.getKey(), entry.getValue());
            }

            while (!sinks.isEmpty()) {
                try {
                    log.info("Cannot shut down yet! Sinks remaining: %s", Joiner.on(", ")
                            .join(Iterables.transform(sinks.values(), new Function<Sink, String>() {
                                @Override
                                public String apply(Sink input) {
                                    return input.getSegment().getIdentifier();
                                }
                            })));

                    synchronized (handoffCondition) {
                        while (!sinks.isEmpty()) {
                            handoffCondition.wait();
                        }
                    }
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }

            // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the
            // ServerView sends it a new segment callback
            if (scheduledExecutor != null) {
                scheduledExecutor.shutdown();
            }

            stopped = true;
        }

        private void initializeExecutors() {
            if (persistExecutor == null) {
                persistExecutor = Executors.newFixedThreadPool(1,
                        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build());
            }
            if (scheduledExecutor == null) {
                scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
                        .setDaemon(true).setNameFormat("plumber_scheduled_%d").build());
            }
        }

        private void bootstrapSinksFromDisk() {
            for (File sinkDir : computeBaseDir(schema).listFiles()) {
                Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

                //final File[] sinkFiles = sinkDir.listFiles();
                // To avoid reading and listing of "merged" dir
                final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {
                    @Override
                    public boolean accept(File dir, String fileName) {
                        return !(Ints.tryParse(fileName) == null);
                    }
                });
                Arrays.sort(sinkFiles, new Comparator<File>() {
                    @Override
                    public int compare(File o1, File o2) {
                        try {
                            return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                        } catch (NumberFormatException e) {
                            log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                            return o1.compareTo(o2);
                        }
                    }
                });

                try {
                    List<FireHydrant> hydrants = Lists.newArrayList();
                    for (File segmentDir : sinkFiles) {
                        log.info("Loading previously persisted segment at [%s]", segmentDir);

                        // Although this has been tackled at start of this method.
                        // Just a doubly-check added to skip "merged" dir. from being added to hydrants 
                        // If 100% sure that this is not needed, this check can be removed.
                        if (Ints.tryParse(segmentDir.getName()) == null) {
                            continue;
                        }

                        hydrants.add(
                                new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                                        Integer.parseInt(segmentDir.getName())));
                    }

                    Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval),
                            hydrants);
                    sinks.put(sinkInterval.getStartMillis(), currSink);
                    sinkTimeline.add(currSink.getInterval(), currSink.getVersion(),
                            new SingleElementPartitionChunk<Sink>(currSink));

                    segmentAnnouncer.announceSegment(currSink.getSegment());
                } catch (IOException e) {
                    log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                            .addData("interval", sinkInterval).emit();
                }
            }
        }

        private void registerServerViewCallback() {
            serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
                @Override
                public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
                    if (stopped) {
                        log.info("Unregistering ServerViewCallback");
                        persistExecutor.shutdown();
                        return ServerView.CallbackAction.UNREGISTER;
                    }

                    if ("realtime".equals(server.getType())) {
                        return ServerView.CallbackAction.CONTINUE;
                    }

                    log.debug("Checking segment[%s] on server[%s]", segment, server);
                    if (schema.getDataSource().equals(segment.getDataSource())) {
                        final Interval interval = segment.getInterval();
                        for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                            final Long sinkKey = entry.getKey();
                            if (interval.contains(sinkKey)) {
                                final Sink sink = entry.getValue();
                                log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server);

                                final String segmentVersion = segment.getVersion();
                                final String sinkVersion = sink.getSegment().getVersion();
                                if (segmentVersion.compareTo(sinkVersion) >= 0) {
                                    log.info("Segment version[%s] >= sink version[%s]", segmentVersion,
                                            sinkVersion);
                                    try {
                                        segmentAnnouncer.unannounceSegment(sink.getSegment());
                                        FileUtils
                                                .deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                        log.info("Removing sinkKey %d for segment %s", sinkKey,
                                                sink.getSegment().getIdentifier());
                                        sinks.remove(sinkKey);
                                        sinkTimeline.remove(sink.getInterval(), sink.getVersion(),
                                                new SingleElementPartitionChunk<Sink>(sink));

                                        synchronized (handoffCondition) {
                                            handoffCondition.notifyAll();
                                        }
                                    } catch (IOException e) {
                                        log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                                schema.getDataSource()).addData("interval", sink.getInterval())
                                                .emit();
                                    }
                                }
                            }
                        }
                    }

                    return ServerView.CallbackAction.CONTINUE;
                }
            });
        }

        private void startPersistThread() {
            final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
            final long windowMillis = windowPeriod.toStandardDuration().getMillis();

            log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow) + windowMillis)));

            ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
                    new Duration(System.currentTimeMillis(),
                            segmentGranularity.increment(truncatedNow) + windowMillis),
                    new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
                    new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                            schema.getDataSource(), schema.getShardSpec().getPartitionNum())) {
                        @Override
                        public ScheduledExecutors.Signal doCall() {
                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            }

                            log.info("Starting merge and push.");

                            long minTimestamp = segmentGranularity
                                    .truncate(rejectionPolicy.getCurrMaxTime().minus(windowMillis)).getMillis();

                            List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                            for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                                final Long intervalStart = entry.getKey();
                                if (intervalStart < minTimestamp) {
                                    log.info("Adding entry[%s] for merge and push.", entry);
                                    sinksToPush.add(entry);
                                }
                            }

                            for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                                persistAndMerge(entry.getKey(), entry.getValue());
                            }

                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            } else {
                                return ScheduledExecutors.Signal.REPEAT;
                            }
                        }
                    });
        }
    };
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumberSchool.java

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();//from  w  w  w.  j  a v a  2  s .co  m

    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    return new Plumber() {
        private volatile boolean stopped = false;
        private volatile ExecutorService persistExecutor = null;
        private volatile ScheduledExecutorService scheduledExecutor = null;

        private final Map<Long, Sink> sinks = Maps.newConcurrentMap();
        private final VersionedIntervalTimeline<String, Sink> sinkTimeline = new VersionedIntervalTimeline<String, Sink>(
                String.CASE_INSENSITIVE_ORDER);

        @Override
        public void startJob() {
            computeBaseDir(schema).mkdirs();
            initializeExecutors();
            bootstrapSinksFromDisk();
            registerServerViewCallback();
            startPersistThread();
        }

        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                final Interval sinkInterval = new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime)));

                retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval));

                try {
                    segmentAnnouncer.announceSegment(retVal.getSegment());
                    sinks.put(truncatedTime, retVal);
                    sinkTimeline.add(retVal.getInterval(), retVal.getVersion(),
                            new SingleElementPartitionChunk<Sink>(retVal));
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList();
            for (Interval interval : query.getIntervals()) {
                querySinks.addAll(sinkTimeline.lookup(interval));
            }

            return toolchest.mergeResults(
                    factory.mergeRunners(queryExecutorService, FunctionalIterable.create(querySinks)
                            .transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() {
                                @Override
                                public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) {
                                    final Sink theSink = holder.getObject().getChunk(0).getObject();
                                    return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(
                                            emitter, builderFn,
                                            factory.mergeRunners(MoreExecutors.sameThreadExecutor(),
                                                    Iterables.transform(theSink,
                                                            new Function<FireHydrant, QueryRunner<T>>() {
                                                                @Override
                                                                public QueryRunner<T> apply(FireHydrant input) {
                                                                    return factory
                                                                            .createRunner(input.getSegment());
                                                                }
                                                            }))),
                                            new SpecificSegmentSpec(new SegmentDescriptor(holder.getInterval(),
                                                    theSink.getSegment().getVersion(),
                                                    theSink.getSegment().getShardSpec().getPartitionNum())));
                                }
                            })));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        // Submits persist-n-merge task for a Sink to the persistExecutor
        private void persistAndMerge(final long truncatedTime, final Sink sink) {
            final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                    new DateTime(truncatedTime));
            persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                @Override
                public void doRun() {
                    final Interval interval = sink.getInterval();

                    for (FireHydrant hydrant : sink) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }

                    final File mergedTarget = new File(computePersistDir(schema, interval), "merged");
                    if (mergedTarget.exists()) {
                        log.info("Skipping already-merged sink: %s", sink);
                        return;
                    }

                    File mergedFile = null;
                    try {
                        List<QueryableIndex> indexes = Lists.newArrayList();
                        for (FireHydrant fireHydrant : sink) {
                            Segment segment = fireHydrant.getSegment();
                            final QueryableIndex queryableIndex = segment.asQueryableIndex();
                            log.info("Adding hydrant[%s]", fireHydrant);
                            indexes.add(queryableIndex);
                        }

                        mergedFile = IndexMerger.mergeQueryableIndex(indexes, schema.getAggregators(),
                                mergedTarget);

                        QueryableIndex index = IndexIO.loadIndex(mergedFile);

                        DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment()
                                .withDimensions(Lists.newArrayList(index.getAvailableDimensions())));

                        segmentPublisher.publishSegment(segment);
                    } catch (IOException e) {
                        log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource())
                                .addData("interval", interval).emit();
                    }

                    if (mergedFile != null) {
                        try {
                            if (mergedFile != null) {
                                log.info("Deleting Index File[%s]", mergedFile);
                                FileUtils.deleteDirectory(mergedFile);
                            }
                        } catch (IOException e) {
                            log.warn(e, "Error deleting directory[%s]", mergedFile);
                        }
                    }
                }
            });
        }

        @Override
        public void finishJob() {
            log.info("Shutting down...");

            for (final Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                persistAndMerge(entry.getKey(), entry.getValue());
            }

            while (!sinks.isEmpty()) {
                try {
                    log.info("Cannot shut down yet! Sinks remaining: %s", Joiner.on(", ")
                            .join(Iterables.transform(sinks.values(), new Function<Sink, String>() {
                                @Override
                                public String apply(Sink input) {
                                    return input.getSegment().getIdentifier();
                                }
                            })));

                    synchronized (handoffCondition) {
                        while (!sinks.isEmpty()) {
                            handoffCondition.wait();
                        }
                    }
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }

            // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the
            // ServerView sends it a new segment callback
            if (scheduledExecutor != null) {
                scheduledExecutor.shutdown();
            }

            stopped = true;
        }

        private void initializeExecutors() {
            if (persistExecutor == null) {
                persistExecutor = Executors.newFixedThreadPool(1,
                        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build());
            }
            if (scheduledExecutor == null) {
                scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
                        .setDaemon(true).setNameFormat("plumber_scheduled_%d").build());
            }
        }

        private void bootstrapSinksFromDisk() {
            File baseDir = computeBaseDir(schema);
            if (baseDir == null || !baseDir.exists()) {
                return;
            }

            File[] files = baseDir.listFiles();
            if (files == null) {
                return;
            }

            for (File sinkDir : files) {
                Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

                //final File[] sinkFiles = sinkDir.listFiles();
                // To avoid reading and listing of "merged" dir
                final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {
                    @Override
                    public boolean accept(File dir, String fileName) {
                        return !(Ints.tryParse(fileName) == null);
                    }
                });
                Arrays.sort(sinkFiles, new Comparator<File>() {
                    @Override
                    public int compare(File o1, File o2) {
                        try {
                            return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                        } catch (NumberFormatException e) {
                            log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                            return o1.compareTo(o2);
                        }
                    }
                });

                try {
                    List<FireHydrant> hydrants = Lists.newArrayList();
                    for (File segmentDir : sinkFiles) {
                        log.info("Loading previously persisted segment at [%s]", segmentDir);

                        // Although this has been tackled at start of this method.
                        // Just a doubly-check added to skip "merged" dir. from being added to hydrants 
                        // If 100% sure that this is not needed, this check can be removed.
                        if (Ints.tryParse(segmentDir.getName()) == null) {
                            continue;
                        }

                        hydrants.add(
                                new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                                        Integer.parseInt(segmentDir.getName())));
                    }

                    Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval),
                            hydrants);
                    sinks.put(sinkInterval.getStartMillis(), currSink);
                    sinkTimeline.add(currSink.getInterval(), currSink.getVersion(),
                            new SingleElementPartitionChunk<Sink>(currSink));

                    segmentAnnouncer.announceSegment(currSink.getSegment());
                } catch (IOException e) {
                    log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                            .addData("interval", sinkInterval).emit();
                }
            }
        }

        private void registerServerViewCallback() {
            serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
                @Override
                public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
                    if (stopped) {
                        log.info("Unregistering ServerViewCallback");
                        persistExecutor.shutdown();
                        return ServerView.CallbackAction.UNREGISTER;
                    }

                    if ("realtime".equals(server.getType())) {
                        return ServerView.CallbackAction.CONTINUE;
                    }

                    log.debug("Checking segment[%s] on server[%s]", segment, server);
                    if (schema.getDataSource().equals(segment.getDataSource())) {
                        final Interval interval = segment.getInterval();
                        for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                            final Long sinkKey = entry.getKey();
                            if (interval.contains(sinkKey)) {
                                final Sink sink = entry.getValue();
                                log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server);

                                final String segmentVersion = segment.getVersion();
                                final String sinkVersion = sink.getSegment().getVersion();
                                if (segmentVersion.compareTo(sinkVersion) >= 0) {
                                    log.info("Segment version[%s] >= sink version[%s]", segmentVersion,
                                            sinkVersion);
                                    try {
                                        segmentAnnouncer.unannounceSegment(sink.getSegment());
                                        FileUtils
                                                .deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                        log.info("Removing sinkKey %d for segment %s", sinkKey,
                                                sink.getSegment().getIdentifier());
                                        sinks.remove(sinkKey);
                                        sinkTimeline.remove(sink.getInterval(), sink.getVersion(),
                                                new SingleElementPartitionChunk<Sink>(sink));

                                        synchronized (handoffCondition) {
                                            handoffCondition.notifyAll();
                                        }
                                    } catch (IOException e) {
                                        log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                                schema.getDataSource()).addData("interval", sink.getInterval())
                                                .emit();
                                    }
                                }
                            }
                        }
                    }

                    return ServerView.CallbackAction.CONTINUE;
                }
            });
        }

        private void startPersistThread() {
            final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
            final long windowMillis = windowPeriod.toStandardDuration().getMillis();

            log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow) + windowMillis)));

            ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
                    new Duration(System.currentTimeMillis(),
                            segmentGranularity.increment(truncatedNow) + windowMillis),
                    new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
                    new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                            schema.getDataSource(), schema.getShardSpec().getPartitionNum())) {
                        @Override
                        public ScheduledExecutors.Signal doCall() {
                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            }

                            log.info("Starting merge and push.");

                            long minTimestamp = segmentGranularity
                                    .truncate(rejectionPolicy.getCurrMaxTime().minus(windowMillis)).getMillis();

                            List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                            for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                                final Long intervalStart = entry.getKey();
                                if (intervalStart < minTimestamp) {
                                    log.info("Adding entry[%s] for merge and push.", entry);
                                    sinksToPush.add(entry);
                                }
                            }

                            for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                                persistAndMerge(entry.getKey(), entry.getValue());
                            }

                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            } else {
                                return ScheduledExecutors.Signal.REPEAT;
                            }
                        }
                    });
        }
    };
}

From source file:pt.lunacloud.auth.AWS4Signer.java

private String getSignedHeadersString(Request<?> request) {
    List<String> sortedHeaders = new ArrayList<String>();
    sortedHeaders.addAll(request.getHeaders().keySet());
    Collections.sort(sortedHeaders, String.CASE_INSENSITIVE_ORDER);

    StringBuilder buffer = new StringBuilder();
    for (String header : sortedHeaders) {
        if (buffer.length() > 0)
            buffer.append(";");
        buffer.append(header.toLowerCase());
    }/*from   w  w  w.  j a  va  2s  .  co  m*/

    return buffer.toString();
}

From source file:com.jaspersoft.jasperserver.war.action.ReportDataSourceAction.java

public Event initAction(RequestContext context) throws Exception {
    // Look for any supplied ResourceDTO by any parent flows
    ReportDataSourceWrapper formObject = (ReportDataSourceWrapper) getFormObject(context);

    // Check for any request parameters sent along
    // If there is no parent flow, start here **For testing as a stand alone flow

    if (formObject.isSubflowMode() && formObject.getAllDatasources() == null) {

        List allDataSources = null;

        String dataSourceType = getQueryLanguage(context);

        if (dataSourceType != null && dataSourceType.trim().equalsIgnoreCase("olapClientConnection")) {

            // get a list of all JDBC and JNDI datasources in repo and set in the formObject
            FilterCriteria criteria = FilterCriteria.createFilter(JdbcReportDataSource.class);
            ResourceLookup[] jdbcLookups = repository
                    .findResource(JasperServerUtil.getExecutionContext(context), criteria);

            criteria = FilterCriteria.createFilter(JndiJdbcReportDataSource.class);
            ResourceLookup[] jndiLookups = repository
                    .findResource(JasperServerUtil.getExecutionContext(context), criteria);

            if (jdbcLookups != null && jdbcLookups.length != 0) {
                log("Found Jdbc DataSource lookups size= " + jdbcLookups.length);
                allDataSources = new ArrayList(jdbcLookups.length);
                for (ResourceLookup lookup : jdbcLookups) {
                    allDataSources.add(lookup.getURIString());
                }/*from w w w.  ja v  a2s.c  o  m*/
            }

            if (jndiLookups != null && jndiLookups.length != 0) {
                log("Found JndiJdbc DataSource lookups size= " + jndiLookups.length);

                if (allDataSources == null) {
                    allDataSources = new ArrayList(jndiLookups.length);
                }
                for (ResourceLookup lookup : jndiLookups) {
                    allDataSources.add(lookup.getURIString());
                }
            }

            Collections.sort(allDataSources, String.CASE_INSENSITIVE_ORDER);

        } else {
            // get a list of all datasources in repo and set in the formObject
            ResourceLookup[] lookups = engine.getDataSources(JasperServerUtil.getExecutionContext(context),
                    dataSourceType);

            if (lookups != null && lookups.length != 0) {
                allDataSources = new ArrayList(lookups.length);

                log("Found ReportDataSource lookups size=" + lookups.length);

                for (ResourceLookup dr : lookups) {
                    allDataSources.add(dr.getURIString());
                }
            }
        }

        formObject.setAllDatasources(allDataSources);

        // TODO get this from main flow
        getAllFolders(formObject);

        String folderURI = (String) context.getFlowScope().get(PARENT_FOLDER_ATTR);
        if (folderURI == null) {
            folderURI = "/";
        }

        if (formObject.getReportDataSource() != null) { // TODO put parent folder in flow scope in main flow
            formObject.getReportDataSource()
                    .setParentFolder((String) context.getFlowScope().get(PARENT_FOLDER_ATTR));
        }
    }

    log("Type of datasource=" + formObject.getType() + " Mode=" + formObject.getMode());
    //   context.getFlowScope().put(FORM_OBJECT_KEY, formObject);
    context.getFlowScope().put("constants", constants);
    context.getExternalContext().getSessionMap().put(DATASOURCE_TREE_DATA_PROVIDER, typedTreeDataProvider);
    context.getFlowScope().put(ATTRIBUTE_RESOURCE_ID_NOT_SUPPORTED_SYMBOLS,
            configuration.getResourceIdNotSupportedSymbols());

    return success();
}

From source file:org.gcaldaemon.core.notifier.GmailNotifier.java

public final void run() {
    log.info("Gmail notifier started successfully.");
    try {/*from  ww w .jav a 2  s . c  om*/
        sleep(7000);
    } catch (Exception ignored) {
        return;
    }

    // Processed (displayed) mails
    HashSet processedMails = new HashSet();

    // Polling mailbox
    int i;
    for (;;) {
        try {

            // Verify local username
            if (users != null) {

                // List active users
                String[] activeUsers = getActiveUsers();
                boolean enabled = false;
                if (activeUsers != null && activeUsers.length != 0) {
                    for (i = 0; i < activeUsers.length; i++) {
                        enabled = isUserMatch(activeUsers[i]);
                        if (enabled) {
                            break;
                        }
                    }
                    if (!enabled) {

                        // Sleep for a minute
                        log.debug("Access denied for active local users.");
                        sleep(MINUTE);

                        // Restart loop (verify username)
                        continue;
                    }
                }
            }

            // Get Gmail address book (or null)
            GmailContact[] contacts = configurator.getAddressBook();
            GmailContact contact;

            // Load feed entries
            SyndEntry[] entries = FeedUtilities.getFeedEntries(FEED_URL, username, password);
            SyndEntry entry;
            HashSet newMails = new HashSet();
            for (i = 0; i < entries.length; i++) {
                entry = entries[i];
                String date = getDate(entry);
                String from = getFrom(entry);
                if (contacts != null) {
                    for (int n = 0; n < contacts.length; n++) {
                        contact = contacts[n];
                        if (from.equalsIgnoreCase(contact.email)) {
                            from = contact.name;
                            break;
                        }
                    }
                }
                String title = getTitle(entry);
                if (mailtermSubject != null) {
                    if (title.equals(mailtermSubject) || title.equals("Re:" + mailtermSubject)) {

                        // Do not display mailterm commands and responses
                        continue;
                    }
                }
                String summary = getSummary(entry);
                newMails.add(date + '\t' + from + '\t' + title + '\t' + summary);
            }

            // Remove readed mails
            Iterator iterator = processedMails.iterator();
            Object key;
            while (iterator.hasNext()) {
                key = iterator.next();
                if (!newMails.contains(key)) {
                    iterator.remove();
                }
            }

            // Look up unprocessed mails
            LinkedList unprocessedMails = new LinkedList();
            iterator = newMails.iterator();
            while (iterator.hasNext()) {
                key = iterator.next();
                if (processedMails.contains(key)) {
                    continue;
                }
                processedMails.add(key);
                unprocessedMails.addLast(key);
            }

            // Display unprocessed mails
            if (!unprocessedMails.isEmpty()) {

                String[] array = new String[unprocessedMails.size()];
                unprocessedMails.toArray(array);
                Arrays.sort(array, String.CASE_INSENSITIVE_ORDER);
                window.show(array);
            }

            // Sleep
            sleep(pollingTimeout);

        } catch (InterruptedException interrupt) {

            // Dispose window
            if (window != null) {
                try {
                    window.setVisible(false);
                } catch (Exception ignored) {
                }
            }
            break;
        } catch (Exception loadError) {
            log.error("Unable to load Gmail feed!", loadError);
            try {
                sleep(HOUR);
            } catch (Exception interrupt) {
                return;
            }
        }
    }
}