Example usage for java.lang Thread getName

List of usage examples for java.lang Thread getName

Introduction

In this page you can find the example usage for java.lang Thread getName.

Prototype

public final String getName() 

Source Link

Document

Returns this thread's name.

Usage

From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java

private EventProcessingResult doAggregateRawEventsInternal() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
        throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME
                + " must be owned by the current thread and server");
    }/*from   www  . j ava 2s.co m*/

    if (!this.portalEventDimensionPopulator.isCheckedDimensions()) {
        //First time aggregation has happened, run populateDimensions to ensure enough dimension data exists
        final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions();
        if (!populatedDimensions) {
            this.logger.warn(
                    "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown");
            return null;
        }
    }

    //Flush any dimension creation before aggregation
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(ProcessingType.AGGREGATION, true);

    //Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    final String previousServerName = eventAggregatorStatus.getServerName();
    if (previousServerName != null && !serverName.equals(previousServerName)) {
        this.logger.debug("Last aggregation run on {} clearing all aggregation caches", previousServerName);
        final Session session = getEntityManager().unwrap(Session.class);
        final Cache cache = session.getSessionFactory().getCache();
        cache.evictEntityRegions();
    }

    eventAggregatorStatus.setServerName(serverName);

    //Calculate date range for aggregation
    DateTime lastAggregated = eventAggregatorStatus.getLastEventDate();
    if (lastAggregated == null) {
        lastAggregated = portalEventDao.getOldestPortalEventTimestamp();

        //No portal events to aggregate, skip aggregation
        if (lastAggregated == null) {
            return new EventProcessingResult(0, null, null, true);
        }

        //First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time 
        final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao
                .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true);
        AggregationIntervalInfo oldestMinuteInterval = this.intervalHelper
                .getIntervalInfo(AggregationInterval.MINUTE, lastAggregated);
        cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1));
        eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);
    }

    final DateTime newestEventTime = DateTime.now().minus(this.aggregationDelay).secondOfMinute()
            .roundFloorCopy();

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    final MutableInt events = new MutableInt();
    final MutableObject lastEventDate = new MutableObject(newestEventTime);

    boolean complete;
    try {
        currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime);

        logger.debug("Starting aggregation of events between {} (inc) and {} (exc)", lastAggregated,
                newestEventTime);

        //Do aggregation, capturing the start and end dates
        eventAggregatorStatus.setLastStart(DateTime.now());

        complete = portalEventDao.aggregatePortalEvents(lastAggregated, newestEventTime,
                this.eventAggregationBatchSize,
                new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus));

        eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue());
        eventAggregatorStatus.setLastEnd(DateTime.now());
    } finally {
        currentThread.setName(currentName);
    }

    //Store the results of the aggregation
    eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus);

    complete = complete
            && (this.eventAggregationBatchSize <= 0 || events.intValue() < this.eventAggregationBatchSize);
    return new EventProcessingResult(events.intValue(), lastAggregated,
            eventAggregatorStatus.getLastEventDate(), complete);
}

From source file:org.talend.commons.utils.threading.Locker.java

private void initThreadsPool() {
    treadsPool = Executors.newCachedThreadPool(new ThreadFactory() {

        @Override//from  w ww .j a v  a 2 s. com
        public Thread newThread(Runnable r) {
            Thread newThread = Executors.defaultThreadFactory().newThread(r);
            newThread.setName(newThread.getName() + "_" + Locker.class.getSimpleName()); //$NON-NLS-1$
            return newThread;
        }

    });
}

From source file:org.apache.hadoop.hbase.regionserver.transactional.TestTHLogRecovery.java

private void threadDumpingJoin(final Thread t) {
    if (t == null) {
        return;//  w  ww. j a v a2 s  . c o  m
    }
    long startTime = EnvironmentEdgeManager.currentTimeMillis();
    while (t.isAlive()) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.info("Continuing...", e);
        }
        if (EnvironmentEdgeManager.currentTimeMillis() - startTime > 60000) {
            startTime = EnvironmentEdgeManager.currentTimeMillis();
            ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
                    "Automatic Stack Trace every 60 seconds waiting on " + t.getName());
        }
    }
}

From source file:com.jkoolcloud.tnt4j.streams.outputs.AbstractJKCloudOutput.java

@Override
public void handleConsumerThread(Thread t) throws IllegalStateException {
    Tracker tracker = buildTracker(defaultSource.getFQName());
    checkTrackerState(tracker);//www  . j ava  2  s.  c  o m
    synchronized (trackersMap) {
        String fqn = defaultSource.getFQName();
        trackersMap.put(getTrackersMapKey(t, fqn), tracker);
        logger().log(OpLevel.DEBUG, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                "TNTStreamOutput.default.tracker"), (t == null ? "null" : t.getName()), fqn);
    }
}

From source file:org.apache.qpid.server.Main.java

protected void setExceptionHandler() {
    Thread.UncaughtExceptionHandler handler = null;
    String handlerClass = System.getProperty("qpid.broker.exceptionHandler");
    if (handlerClass != null) {
        try {/*from w  ww.  ja va2  s  .  c o m*/
            handler = (Thread.UncaughtExceptionHandler) Class.forName(handlerClass).newInstance();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException
                | ClassCastException e) {

        }
    }

    if (handler == null) {
        handler = new Thread.UncaughtExceptionHandler() {
            public void uncaughtException(final Thread t, final Throwable e) {
                boolean continueOnError = Boolean.getBoolean("qpid.broker.exceptionHandler.continue");
                try {
                    System.err.println(
                            "########################################################################");
                    System.err.println("#");
                    System.err.print("# Unhandled Exception ");
                    System.err.print(e.toString());
                    System.err.print(" in Thread ");
                    System.err.println(t.getName());
                    System.err.println("#");
                    System.err.println(continueOnError
                            ? "# Forced to continue by JVM setting 'qpid.broker.exceptionHandler.continue'"
                            : "# Exiting");
                    System.err.println("#");
                    System.err.println(
                            "########################################################################");
                    e.printStackTrace(System.err);

                    Logger logger = LoggerFactory.getLogger("org.apache.qpid.server.Main");
                    logger.error("Uncaught exception, " + (continueOnError ? "continuing." : "shutting down."),
                            e);
                } finally {
                    if (!continueOnError) {
                        Runtime.getRuntime().halt(1);
                    }
                }

            }
        };

        Thread.setDefaultUncaughtExceptionHandler(handler);
    }
}

From source file:org.apereo.portal.events.aggr.PortalRawEventsAggregatorImpl.java

private EventProcessingResult doAggregateRawEventsInternal() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
        throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME
                + " must be owned by the current thread and server");
    }/*from   w  w w.  j av a2s.  co  m*/

    if (!this.portalEventDimensionPopulator.isCheckedDimensions()) {
        //First time aggregation has happened, run populateDimensions to ensure enough dimension data exists
        final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions();
        if (!populatedDimensions) {
            this.logger.warn(
                    "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown");
            return null;
        }
    }

    //Flush any dimension creation before aggregation
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.AGGREGATION, true);

    //Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    final String previousServerName = eventAggregatorStatus.getServerName();
    if (previousServerName != null && !serverName.equals(previousServerName)) {
        this.logger.debug("Last aggregation run on {} clearing all aggregation caches", previousServerName);
        final Session session = getEntityManager().unwrap(Session.class);
        final Cache cache = session.getSessionFactory().getCache();
        cache.evictEntityRegions();
    }

    eventAggregatorStatus.setServerName(serverName);

    //Calculate date range for aggregation
    DateTime lastAggregated = eventAggregatorStatus.getLastEventDate();
    if (lastAggregated == null) {
        lastAggregated = portalEventDao.getOldestPortalEventTimestamp();

        //No portal events to aggregate, skip aggregation
        if (lastAggregated == null) {
            return new EventProcessingResult(0, null, null, true);
        }

        //First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time 
        final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao
                .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.CLEAN_UNCLOSED, true);
        AggregationIntervalInfo oldestMinuteInterval = this.intervalHelper
                .getIntervalInfo(AggregationInterval.MINUTE, lastAggregated);
        cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1));
        eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);
    }

    final DateTime newestEventTime = DateTime.now().minus(this.aggregationDelay).secondOfMinute()
            .roundFloorCopy();

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    final MutableInt events = new MutableInt();
    final MutableObject lastEventDate = new MutableObject(newestEventTime);

    boolean complete;
    try {
        currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime);

        logger.debug("Starting aggregation of events between {} (inc) and {} (exc)", lastAggregated,
                newestEventTime);

        //Do aggregation, capturing the start and end dates
        eventAggregatorStatus.setLastStart(DateTime.now());

        complete = portalEventDao.aggregatePortalEvents(lastAggregated, newestEventTime,
                this.eventAggregationBatchSize,
                new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus));

        eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue());
        eventAggregatorStatus.setLastEnd(DateTime.now());
    } finally {
        currentThread.setName(currentName);
    }

    //Store the results of the aggregation
    eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus);

    complete = complete
            && (this.eventAggregationBatchSize <= 0 || events.intValue() < this.eventAggregationBatchSize);
    return new EventProcessingResult(events.intValue(), lastAggregated,
            eventAggregatorStatus.getLastEventDate(), complete);
}

From source file:org.apache.hadoop.hive.metastore.MetaStoreUtils.java

private static String getAllThreadStacksAsString() {
    Map<Thread, StackTraceElement[]> threadStacks = Thread.getAllStackTraces();
    StringBuilder sb = new StringBuilder();
    for (Map.Entry<Thread, StackTraceElement[]> entry : threadStacks.entrySet()) {
        Thread t = entry.getKey();
        sb.append(System.lineSeparator());
        sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState());
        addStackString(entry.getValue(), sb);
    }/*from   w w  w  .  j a v  a 2s  .com*/
    return sb.toString();
}

From source file:ste.web.http.api.BugFreeApiHandlerExec.java

@Test
public void running_multiple_thread_in_different_contexts() throws Exception {
    final HttpSessionContext CTX1 = new HttpSessionContext();
    final HttpSessionContext CTX2 = new HttpSessionContext();
    CTX1.setAttribute(HttpCoreContext.HTTP_CONNECTION, getConnection());
    CTX2.setAttribute(HttpCoreContext.HTTP_CONNECTION, getConnection());
    Thread t1 = new Thread(new Runnable() {
        @Override// w w  w .ja  v a2 s  .  c o m
        public void run() {
            try {
                handler.handle(request("/api/app/get/multithreading"), response, CTX1);
            } catch (Exception x) {
                x.printStackTrace();
            }
        }
    });
    Thread t2 = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                handler.handle(request("/api/app/get/multithreading"), response, CTX2);
            } catch (Exception x) {
                x.printStackTrace();
            }
        }
    });
    t1.start();
    t2.start();
    t1.join();
    t2.join();
    then(CTX1.get("view")).isEqualTo(t1.getName());
    then(CTX2.get("view")).isEqualTo(t2.getName());
}

From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java

@Override
@AggrEventsTransactional//w w  w. j a v a2s  .  c o  m
public EventProcessingResult doCloseAggregations() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
        throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME
                + " must be owned by the current thread and server");
    }

    final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true);

    //Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    cleanUnclosedStatus.setServerName(serverName);
    cleanUnclosedStatus.setLastStart(new DateTime());

    //Determine date of most recently aggregated data
    final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao
            .getEventAggregatorStatus(ProcessingType.AGGREGATION, false);
    if (eventAggregatorStatus == null || eventAggregatorStatus.getLastEventDate() == null) {
        //Nothing has been aggregated, skip unclosed cleanup

        cleanUnclosedStatus.setLastEnd(new DateTime());
        eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);

        return new EventProcessingResult(0, null, null, true);
    }

    final DateTime lastAggregatedDate = eventAggregatorStatus.getLastEventDate();

    //If lastCleanUnclosedDate is null use the oldest date dimension as there can be 
    //no aggregations that exist before it
    final DateTime lastCleanUnclosedDate;
    if (cleanUnclosedStatus.getLastEventDate() == null) {
        final DateDimension oldestDateDimension = this.dateDimensionDao.getOldestDateDimension();
        lastCleanUnclosedDate = oldestDateDimension.getDate().toDateTime();
    } else {
        lastCleanUnclosedDate = cleanUnclosedStatus.getLastEventDate();
    }

    if (!(lastCleanUnclosedDate.isBefore(lastAggregatedDate))) {
        logger.debug("No events aggregated since last unclosed aggregation cleaning, skipping clean: {}",
                lastAggregatedDate);
        return new EventProcessingResult(0, lastCleanUnclosedDate, lastAggregatedDate, true);
    }

    //Switch to flush on commit to avoid flushes during queries
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    //Track the number of closed aggregations and the last date of a cleaned interval
    int closedAggregations = 0;
    int cleanedIntervals = 0;
    DateTime cleanUnclosedEnd;

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    try {
        currentThread.setName(currentName + "-" + lastCleanUnclosedDate + "-" + lastAggregatedDate);

        //Local caches used to reduce db io
        final IntervalsForAggregatorHelper intervalsForAggregatorHelper = new IntervalsForAggregatorHelper();
        final Map<AggregationInterval, AggregationIntervalInfo> previousIntervals = new HashMap<AggregationInterval, AggregationIntervalInfo>();

        //A DateTime within the next interval to close aggregations in
        DateTime nextIntervalDate = lastCleanUnclosedDate;
        do {
            //Reset our goal of catching up to the last aggregated event on every iteration
            cleanUnclosedEnd = lastAggregatedDate;

            //For each interval the aggregator supports, cleanup the unclosed aggregations
            for (final AggregationInterval interval : intervalsForAggregatorHelper.getHandledIntervals()) {
                final AggregationIntervalInfo previousInterval = previousIntervals.get(interval);
                if (previousInterval != null && nextIntervalDate.isBefore(previousInterval.getEnd())) {
                    logger.debug(
                            "{} interval before {} has already been cleaned during this execution, ignoring",
                            interval, previousInterval.getEnd());
                    continue;
                }

                //The END date of the last clean session will find us the next interval to clean
                final AggregationIntervalInfo nextIntervalToClean = intervalHelper.getIntervalInfo(interval,
                        nextIntervalDate);
                previousIntervals.put(interval, nextIntervalToClean);
                if (nextIntervalToClean == null) {
                    continue;
                }

                final DateTime start = nextIntervalToClean.getStart();
                final DateTime end = nextIntervalToClean.getEnd();
                if (!end.isBefore(lastAggregatedDate)) {
                    logger.debug("{} interval between {} and {} is still active, ignoring",
                            new Object[] { interval, start, end });
                    continue;
                }

                //Track the oldest interval end, this ensures that nothing is missed
                if (end.isBefore(cleanUnclosedEnd)) {
                    cleanUnclosedEnd = end;
                }

                logger.debug("Cleaning unclosed {} aggregations between {} and {}",
                        new Object[] { interval, start, end });

                for (final IPortalEventAggregator<PortalEvent> portalEventAggregator : portalEventAggregators) {
                    checkShutdown();

                    final Class<? extends IPortalEventAggregator<?>> aggregatorType = getClass(
                            portalEventAggregator);

                    //Get aggregator specific interval info config
                    final AggregatedIntervalConfig aggregatorIntervalConfig = intervalsForAggregatorHelper
                            .getAggregatorIntervalConfig(aggregatorType);

                    //If the aggregator is being used for the specified interval call cleanUnclosedAggregations
                    if (aggregatorIntervalConfig.isIncluded(interval)) {
                        closedAggregations += portalEventAggregator.cleanUnclosedAggregations(start, end,
                                interval);
                    }
                }

                cleanedIntervals++;
            }

            //Set the next interval to the end date from the last aggregation run
            nextIntervalDate = cleanUnclosedEnd;

            logger.debug("Closed {} aggregations across {} interval before {} with goal of {}", new Object[] {
                    closedAggregations, cleanedIntervals, cleanUnclosedEnd, lastAggregatedDate });
            //Loop until either the batchSize of cleaned aggregations has been reached or no aggregation work is done
        } while (closedAggregations <= cleanUnclosedAggregationsBatchSize
                && cleanedIntervals <= cleanUnclosedIntervalsBatchSize
                && cleanUnclosedEnd.isBefore(lastAggregatedDate));
    } finally {
        currentThread.setName(currentName);
    }

    //Update the status object and store it
    cleanUnclosedStatus.setLastEventDate(cleanUnclosedEnd);
    cleanUnclosedStatus.setLastEnd(new DateTime());
    eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);

    return new EventProcessingResult(closedAggregations, lastCleanUnclosedDate, lastAggregatedDate,
            !cleanUnclosedEnd.isBefore(lastAggregatedDate));
}