Example usage for java.util.concurrent TimeUnit HOURS

List of usage examples for java.util.concurrent TimeUnit HOURS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit HOURS.

Prototype

TimeUnit HOURS

To view the source code for java.util.concurrent TimeUnit HOURS.

Click Source Link

Document

Time unit representing sixty minutes.

Usage

From source file:com.linkedin.pinot.common.metadata.SegmentZKMetadataTest.java

private OfflineSegmentZKMetadata getTestOfflineSegmentMetadata() {
    OfflineSegmentZKMetadata offlineSegmentMetadata = new OfflineSegmentZKMetadata();
    offlineSegmentMetadata.setSegmentName("testTable_O_3000_4000");
    offlineSegmentMetadata.setTableName("testTable");
    offlineSegmentMetadata.setSegmentType(SegmentType.OFFLINE);
    offlineSegmentMetadata.setIndexVersion("v1");
    offlineSegmentMetadata.setStartTime(1000);
    offlineSegmentMetadata.setEndTime(2000);
    offlineSegmentMetadata.setTimeUnit(TimeUnit.HOURS);
    offlineSegmentMetadata.setTotalRawDocs(50000);
    offlineSegmentMetadata.setCrc(54321);
    offlineSegmentMetadata.setCreationTime(1000);
    offlineSegmentMetadata.setDownloadUrl("http://localhost:8000/testTable_O_3000_4000");
    offlineSegmentMetadata.setPushTime(4000);
    offlineSegmentMetadata.setRefreshTime(8000);
    return offlineSegmentMetadata;
}

From source file:eu.tango.energymodeller.datasourceclient.SlurmDataSourceAdaptor.java

/**
 * This takes a string in the format 0:03 i.e. mins:seconds and converts it
 * into seconds/*  w w w .j a v  a  2  s.co  m*/
 *
 * @param duration The string to parse
 * @return The time in seconds the duration string translates to.
 */
public static long parseDurationString(String duration) {
    long seconds = 0;
    //If this is used to parse maximum runtime then the value may be UNLIMITED, thus this is guarded against
    if (duration == null || duration.isEmpty() || !duration.matches("\\d+(:\\d+)*?")
            || duration.equals("UNLIMITED")) {
        return 0;
    }
    String[] durationSplit = duration.split(":"); //0:03 i.e. mins:seconds or 1:0:0 i.e. 1 hour
    switch (durationSplit.length) {
    case 1:
        seconds = seconds + Long.parseLong(durationSplit[0]);
        break;
    case 2:
        seconds = seconds + TimeUnit.MINUTES.toSeconds(Long.parseLong(durationSplit[0]));
        seconds = seconds + Long.parseLong(durationSplit[1]);
        break;
    case 3:
        seconds = seconds + TimeUnit.HOURS.toSeconds(Long.parseLong(durationSplit[0]));
        seconds = seconds + TimeUnit.MINUTES.toSeconds(Long.parseLong(durationSplit[1]));
        seconds = seconds + Long.parseLong(durationSplit[2]);
        break;
    }
    return seconds;
}

From source file:org.dcache.util.histograms.HistogramModelTest.java

@Test
public void updateOnTimeframeHistogramShouldRotateBuffer() throws NoSuchMethodException, InstantiationException,
        IllegalAccessException, InvocationTargetException {
    givenTimeframeHistogram();//  w w w  . ja v  a  2s. c  o m
    givenQueueCountValuesFor(48);
    givenBinUnitOf((double) TimeUnit.HOURS.toMillis(1));
    givenBinCountOf(48);
    givenBinLabelOf(TimeUnit.HOURS.name());
    givenDataLabelOf("COUNT");
    givenHistogramTypeOf("Queued Movers");
    givenHighestBinOf(getHoursInThePastFromNow(3));
    whenConfigureIsCalled();
    assertThatUpdateRotatesBuffer(3);
}

From source file:org.alfresco.test.util.ContentAspectsTests.java

@Test
public void addRestrictableAspectAspect() {
    int hours = 5;
    contentAspect.addRestrictable(userName, password, siteName, plainDoc, hours);
    List<Property<?>> properties = contentAspect.getProperties(userName, password, siteName, plainDoc);
    Assert.assertEquals(contentAspect.getPropertyValue(properties, "dp:offlineExpiresAfter"),
            String.valueOf(TimeUnit.HOURS.toMillis(hours)));
}

From source file:com.gtwm.pb.model.manageSchema.DatabaseDefn.java

/**
 * There should be one DatabaseInfo object per agileBase application
 * instance. This constructor generates it. It bootstraps the application.
 * All schema objects are loaded into memory from the pervasive store.
 * /*from  ww  w .  jav a2 s . c  om*/
 * The authentication manager (AuthManagerInfo), store of all users, roles
 * and permissions is loaded too.
 * 
 * Finally, the data manager (a DataManagementInfo object) is created and
 * initialised
 * 
 * @throws CantDoThatException
 *             If more than one Authenticator was found in the database
 */
public DatabaseDefn(DataSource relationalDataSource, String webAppRoot) throws SQLException,
        ObjectNotFoundException, CantDoThatException, MissingParametersException, CodingErrorException {
    this.relationalDataSource = relationalDataSource;
    // Load table schema objects
    Session hibernateSession = HibernateUtil.currentSession();
    try {
        this.authManager = new AuthManager(relationalDataSource);
    } finally {
        HibernateUtil.closeSession();
    }
    // Methods and objects dealing with data as opposed to the schema are
    // kept in DataManagement
    this.dataManagement = new DataManagement(relationalDataSource, webAppRoot, this.authManager);
    DashboardPopulator dashboardPopulator = new DashboardPopulator(this);
    // Start first dashboard population immediately
    this.initialDashboardPopulatorThread = new Thread(dashboardPopulator);
    this.initialDashboardPopulatorThread.start();
    // and schedule regular dashboard population once a day at a time of low
    // activity
    int hourNow = Calendar.getInstance().get(Calendar.HOUR_OF_DAY);
    int initialDelay = 24 + AppProperties.lowActivityHour - hourNow;
    this.dashboardScheduler = Executors.newSingleThreadScheduledExecutor();
    this.scheduledDashboardPopulate = dashboardScheduler.scheduleAtFixedRate(dashboardPopulator, initialDelay,
            24, TimeUnit.HOURS);
    // one-off boot actions
    // this.addCommentsFeedFields();
}

From source file:com.linkedin.pinot.server.integration.realtime.RealtimeTableDataManagerTest.java

private static RealtimeSegmentZKMetadata getRealtimeSegmentZKMetadata() {
    RealtimeSegmentZKMetadata realtimeSegmentMetadata = new RealtimeSegmentZKMetadata();
    realtimeSegmentMetadata.setSegmentName("testTable_R_1000_groupId0_part0");
    realtimeSegmentMetadata.setTableName("testTable");
    realtimeSegmentMetadata.setSegmentType(SegmentType.REALTIME);
    realtimeSegmentMetadata.setIndexVersion("v1");
    realtimeSegmentMetadata.setStartTime(1000);
    realtimeSegmentMetadata.setEndTime(-1);
    realtimeSegmentMetadata.setTimeUnit(TimeUnit.HOURS);
    realtimeSegmentMetadata.setStatus(Status.IN_PROGRESS);
    realtimeSegmentMetadata.setTotalRawDocs(-1);
    realtimeSegmentMetadata.setCrc(-1);/*  w  w  w  .  j  a v a2 s  .  c om*/
    realtimeSegmentMetadata.setCreationTime(1000);
    return realtimeSegmentMetadata;
}

From source file:org.ethereum.rpc.Web3Impl.java

public String eth_hashrate() {
    BigDecimal hashesPerSecond = BigDecimal.ZERO;
    if (RskSystemProperties.RSKCONFIG.minerServerEnabled()) {
        BigInteger hashesPerHour = this.worldManager.getHashRateCalculator().calculateNodeHashRate(1L,
                TimeUnit.HOURS);
        hashesPerSecond = new BigDecimal(hashesPerHour).divide(new BigDecimal(TimeUnit.HOURS.toSeconds(1)), 3,
                RoundingMode.HALF_UP);
    }/*from  www. j a  v a2s .c om*/

    String result = hashesPerSecond.toString();

    if (logger.isDebugEnabled())
        logger.debug("eth_hashrate(): " + result);

    return result;
}

From source file:org.apdplat.superword.system.AntiRobotFilter.java

public void init(FilterConfig config) throws ServletException {
    int initialDelay = 24 - LocalDateTime.now().getHour();
    scheduledExecutorService.scheduleAtFixedRate(() -> {
        try {/*from w w w  . ja  v  a 2s . c o  m*/
            LOG.info("clear last day anti-robot counter");
            LocalDateTime timePoint = LocalDateTime.now().minusDays(1);
            String date = SIMPLE_DATE_FORMAT
                    .format(Date.from(timePoint.atZone(ZoneId.systemDefault()).toInstant()));
            Map<String, Integer> archive = new HashMap<String, Integer>();
            Enumeration<String> keys = servletContext.getAttributeNames();
            while (keys.hasMoreElements()) {
                String key = keys.nextElement();
                if (key.startsWith("anti-robot-") && key.endsWith(date)) {
                    archive.put(key, ((AtomicInteger) servletContext.getAttribute(key)).intValue());
                }
            }
            archive.keySet().forEach(servletContext::removeAttribute);
            File path = new File(servletContext.getRealPath("/WEB-INF/data/anti-robot-archive/"));
            if (!path.exists()) {
                path.mkdirs();
            }
            String file = path.getPath() + "/" + date + "__user_agent_invalid_count_" + invalidCount + ".txt";
            Files.write(Paths.get(file), archive.entrySet().stream()
                    .map(e -> e.getKey().replace("anti-robot-", "").replace("-", "\t") + "\t" + e.getValue())
                    .map(line -> {
                        String[] attrs = line.split("\\s+");
                        String location = "";
                        if (attrs != null && attrs.length > 1) {
                            String ip = attrs[1];
                            location = IPUtils.getIPLocation(ip).toString();
                        }
                        return line + "\t" + location;
                    }).collect(Collectors.toList()));
            invalidCount = 0;
            LOG.info("clear last day anti-robot counter finished: " + file);
        } catch (Exception e) {
            LOG.error("save anti-robot-archive failed", e);
        }
    }, initialDelay, 24, TimeUnit.HOURS);
}

From source file:org.apache.tez.dag.app.launcher.TezContainerLauncherImpl.java

@Override
public void start() throws TezException {
    // pass a copy of config to ContainerManagementProtocolProxy until YARN-3497 is fixed
    cmProxy = new ContainerManagementProtocolProxy(conf);

    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true)
            .build();/*from   w  w  w.  j a  va 2 s .  co  m*/

    // Start with a default core-pool size of 10 and change it dynamically.
    launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE, Integer.MAX_VALUE, 1, TimeUnit.HOURS,
            new LinkedBlockingQueue<Runnable>(), tf, new CustomizedRejectedExecutionHandler());
    eventHandlingThread = new Thread() {
        @Override
        public void run() {
            ContainerOp event = null;
            while (!Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!serviceStopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                int poolSize = launcherPool.getCorePoolSize();

                // See if we need up the pool size only if haven't reached the
                // maximum limit yet.
                if (poolSize != limitOnPoolSize) {

                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int numNodes = getContext().getNumNodes(TezConstants.getTezYarnServicePluginName());
                    int idealPoolSize = Math.min(limitOnPoolSize, numNodes);

                    if (poolSize < idealPoolSize) {
                        // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
                        // later is just a buffer so we are not always increasing the
                        // pool-size
                        int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + INITIAL_POOL_SIZE);
                        LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
                                + " as number-of-nodes to talk to is " + numNodes);
                        launcherPool.setCorePoolSize(newPoolSize);
                    }
                }

                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(createEventProcessor(event));

                // TODO: Group launching of multiple containers to a single
                // NodeManager into a single connection
            }
        }
    };
    eventHandlingThread.setName("ContainerLauncher Event Handler");
    eventHandlingThread.start();
    boolean cleanupDagDataOnComplete = ShuffleUtils.isTezShuffleHandler(conf)
            && conf.getBoolean(TezConfiguration.TEZ_AM_DAG_CLEANUP_ON_COMPLETION,
                    TezConfiguration.TEZ_AM_DAG_CLEANUP_ON_COMPLETION_DEFAULT);
    if (cleanupDagDataOnComplete) {
        String deletionTrackerClassName = conf.get(TezConfiguration.TEZ_AM_DELETION_TRACKER_CLASS,
                TezConfiguration.TEZ_AM_DELETION_TRACKER_CLASS_DEFAULT);
        deletionTracker = ReflectionUtils.createClazzInstance(deletionTrackerClassName,
                new Class[] { Configuration.class }, new Object[] { conf });
    }
}

From source file:org.apache.hadoop.mapreduce.jobhistory.JobHistory.java

private void startFileMoverThreads() {
    executor = new ThreadPoolExecutor(1, 3, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
}