Example usage for java.util.concurrent TimeUnit DAYS

List of usage examples for java.util.concurrent TimeUnit DAYS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit DAYS.

Prototype

TimeUnit DAYS

To view the source code for java.util.concurrent TimeUnit DAYS.

Click Source Link

Document

Time unit representing twenty four hours.

Usage

From source file:co.com.soinsoftware.hotelero.view.JFRoomPayment.java

private long calculateDaysToBeBilled(final Date initialDate) {
    final Date finalDate = this.getFinalDate(initialDate, 14);
    final long diff = finalDate.getTime() - initialDate.getTime();
    return TimeUnit.DAYS.convert(diff, TimeUnit.MILLISECONDS);
}

From source file:org.apache.metron.common.configuration.profiler.ProfileConfig.java

public ProfileConfig withExpires(Long expiresDays) {
    this.expires = TimeUnit.DAYS.toMillis(expiresDays);
    return this;
}

From source file:com.jivesoftware.os.upena.service.UpenaStore.java

public void init(OrderIdProvider idProvider, int minServicePort, int maxServicePort, boolean cleanup)
        throws Exception {

    PartitionProperties partitionProperties = new PartitionProperties(Durability.fsync_async,
            TimeUnit.DAYS.toMillis(30), TimeUnit.DAYS.toMillis(10), TimeUnit.DAYS.toMillis(30),
            TimeUnit.DAYS.toMillis(10), 0, 0, 0, 0, false, Consistency.quorum, true, true, false,
            RowType.snappy_primary, "lab", -1, null, -1, -1);

    UpenaMap<UserKey, User> upenaUsers = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(userStoreKey), UserKey.class, User.class,
                    new UserKeyProvider(), null);

    UpenaMap<UserKey, User> amzaUsers = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("user"), UserKey.class, User.class, new UserKeyProvider(),
            null);//from  w  w  w.  j  a  va  2s.  com

    users = copy("users", upenaUsers, amzaUsers, cleanup);

    UpenaMap<PermissionKey, Permission> upenaPermissions = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(permissionStoreKey), PermissionKey.class,
                    Permission.class, new PermissionKeyProvider(), null);

    UpenaMap<PermissionKey, Permission> amzaPermissions = new AmzaUpenaMap<>(mapper, amzaService,
            embeddedClientProvider, partitionProperties, getPartitionName("permissions"), PermissionKey.class,
            Permission.class, new PermissionKeyProvider(), null);

    permissions = copy("permissions", upenaPermissions, amzaPermissions, cleanup);

    UpenaMap<ProjectKey, Project> upenaProjects = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(projectStoreKey), ProjectKey.class,
                    Project.class, new ProjectKeyProvider(idProvider), null);

    UpenaMap<ProjectKey, Project> amzaProjects = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("projects"), ProjectKey.class, Project.class,
            new ProjectKeyProvider(idProvider), null);

    projects = copy("projects", upenaProjects, amzaProjects, cleanup);

    UpenaMap<ClusterKey, Cluster> upenaClusters = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(clusterStoreKey), ClusterKey.class,
                    Cluster.class, new ClusterKeyProvider(idProvider), null);

    UpenaMap<ClusterKey, Cluster> amzaClusters = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("clusters"), ClusterKey.class, Cluster.class,
            new ClusterKeyProvider(idProvider), null);

    clusters = copy("clusters", upenaClusters, amzaClusters, cleanup);

    UpenaMap<LBKey, LB> upenaLoadBalancers = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(loadbalancers), LBKey.class, LB.class,
                    new LBKeyProvider(idProvider), null);

    UpenaMap<LBKey, LB> amzaLoadbalancers = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("load-balancers"), LBKey.class, LB.class,
            new LBKeyProvider(idProvider), null);

    loadBalancers = copy("loadBalancers", upenaLoadBalancers, amzaLoadbalancers, cleanup);

    UpenaMap<HostKey, Host> upenaHosts = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(hostStoreKey), HostKey.class, Host.class,
                    new HostKeyProvider(), null);

    UpenaMap<HostKey, Host> amzaHosts = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("hosts"), HostKey.class, Host.class, new HostKeyProvider(),
            null);

    hosts = copy("hosts", upenaHosts, amzaHosts, cleanup);

    UpenaMap<ServiceKey, Service> upenaServices = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(serviceStoreKey), ServiceKey.class,
                    Service.class, new ServiceKeyProvider(idProvider), null);

    UpenaMap<ServiceKey, Service> amzaServices = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("services"), ServiceKey.class, Service.class,
            new ServiceKeyProvider(idProvider), null);

    services = copy("services", upenaServices, amzaServices, cleanup);

    UpenaMap<ReleaseGroupKey, ReleaseGroup> upenaReleases = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(releaseGroupStoreKey), ReleaseGroupKey.class,
                    ReleaseGroup.class, new ReleaseGroupKeyProvider(idProvider), null);

    UpenaMap<ReleaseGroupKey, ReleaseGroup> amzaReleases = new AmzaUpenaMap<>(mapper, amzaService,
            embeddedClientProvider, partitionProperties, getPartitionName("releases"), ReleaseGroupKey.class,
            ReleaseGroup.class, new ReleaseGroupKeyProvider(idProvider), null);

    releaseGroups = copy("releaseGroups", upenaReleases, amzaReleases, cleanup);

    UpenaMap<InstanceKey, Instance> upenaInstances = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(instanceStoreKey), InstanceKey.class,
                    Instance.class, new InstanceKeyProvider(idProvider),
                    new InstanceValidator(minServicePort, maxServicePort));

    UpenaMap<InstanceKey, Instance> amzaInstance = new AmzaUpenaMap<>(mapper, amzaService,
            embeddedClientProvider, partitionProperties, getPartitionName("instances"), InstanceKey.class,
            Instance.class, new InstanceKeyProvider(idProvider),
            new InstanceValidator(minServicePort, maxServicePort));

    instances = copy("instances", upenaInstances, amzaInstance, cleanup);

    UpenaMap<TenantKey, Tenant> upenaTenants = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(tenantStoreKey), TenantKey.class, Tenant.class,
                    new TenantKeyProvider(), null);

    UpenaMap<TenantKey, Tenant> amzaTenants = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("tenants"), TenantKey.class, Tenant.class,
            new TenantKeyProvider(), null);

    tenants = copy("tenants", upenaTenants, amzaTenants, cleanup);

    UpenaMap<MonkeyKey, Monkey> upenaMonkeys = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(monkeyStoreKey), MonkeyKey.class, Monkey.class,
                    new MonkeyKeyProvider(idProvider), null);

    UpenaMap<MonkeyKey, Monkey> amzaMonkeys = new AmzaUpenaMap<>(mapper, amzaService, embeddedClientProvider,
            partitionProperties, getPartitionName("monkeys"), MonkeyKey.class, Monkey.class,
            new MonkeyKeyProvider(idProvider), null);

    monkeys = copy("monkeys", upenaMonkeys, amzaMonkeys, cleanup);

    UpenaMap<ChaosStateKey, ChaosState> upenaChaosStates = upenaAmzaService == null ? null
            : new UpenaTable<>(mapper, upenaAmzaService.getTable(chaosStateStoreKey), ChaosStateKey.class,
                    ChaosState.class, new ChaosStateKeyProvider(idProvider), null);

    UpenaMap<ChaosStateKey, ChaosState> amzaChaosStates = new AmzaUpenaMap<>(mapper, amzaService,
            embeddedClientProvider, partitionProperties, getPartitionName("chaos"), ChaosStateKey.class,
            ChaosState.class, new ChaosStateKeyProvider(idProvider), null);

    chaosStates = copy("chaosStates", upenaChaosStates, amzaChaosStates, cleanup);

    initialized.set(true);
}

From source file:hudson.security.TokenBasedRememberMeServices2SEC868Test.java

@Test
@Issue("SECURITY-868")
public void rememberMeToken_skipExpirationCheck() throws Exception {
    boolean previousConfig = TokenBasedRememberMeServices2.SKIP_TOO_FAR_EXPIRATION_DATE_CHECK;
    try {/* w  w  w .  j  ava 2s.  c o  m*/
        TokenBasedRememberMeServices2.SKIP_TOO_FAR_EXPIRATION_DATE_CHECK = true;

        j.jenkins.setDisableRememberMe(false);

        HudsonPrivateSecurityRealm realm = new HudsonPrivateSecurityRealm(false, false, null);
        TokenBasedRememberMeServices2 tokenService = (TokenBasedRememberMeServices2) realm
                .getSecurityComponents().rememberMe;
        j.jenkins.setSecurityRealm(realm);

        String username = "alice";
        User alice = realm.createAccount(username, username);

        { // a malicious cookie with expiration too far in the future should not work
            JenkinsRule.WebClient wc = j.createWebClient();

            // by default we have 14 days of validity, 
            // here we increase artificially the duration of validity, that could be used to have permanent access
            long oneDay = TimeUnit.DAYS.toMillis(1);
            Cookie cookie = createRememberMeCookie(tokenService, oneDay, alice);
            wc.getCookieManager().addCookie(cookie);

            // the application should not use the cookie to connect
            assertUserConnected(wc, username);
        }

        { // a hand crafted cookie with regular expiration duration works
            JenkinsRule.WebClient wc = j.createWebClient();

            // by default we have 14 days of validity, 
            // here we reduce a bit the expiration date to simulate an "old" cookie (regular usage)
            long minusFiveMinutes = TimeUnit.MINUTES.toMillis(-5);
            Cookie cookie = createRememberMeCookie(tokenService, minusFiveMinutes, alice);
            wc.getCookieManager().addCookie(cookie);

            // if we reactivate the remember me feature, it's ok
            assertUserConnected(wc, username);
        }
    } finally {
        TokenBasedRememberMeServices2.SKIP_TOO_FAR_EXPIRATION_DATE_CHECK = previousConfig;
    }
}

From source file:net.voidfunction.rm.master.AdminServlet.java

private static String getDuration(long millis) {
    long days = TimeUnit.MILLISECONDS.toDays(millis);
    millis -= TimeUnit.DAYS.toMillis(days);
    long hours = TimeUnit.MILLISECONDS.toHours(millis);
    millis -= TimeUnit.HOURS.toMillis(hours);
    long minutes = TimeUnit.MILLISECONDS.toMinutes(millis);
    millis -= TimeUnit.MINUTES.toMillis(minutes);
    long seconds = TimeUnit.MILLISECONDS.toSeconds(millis);

    StringBuilder sb = new StringBuilder();
    sb.append(days);//from  w  ww  . ja va  2  s.co  m
    sb.append(" days, ");
    sb.append(hours);
    sb.append(" hours, ");
    sb.append(minutes);
    sb.append(" minutes, ");
    sb.append(seconds);
    sb.append(" seconds");

    return (sb.toString());
}

From source file:com.netflix.spinnaker.igor.gitlabci.GitlabCiBuildMonitor.java

private int buildCacheJobTTLSeconds() {
    return (int) TimeUnit.DAYS.toSeconds(gitlabCiProperties.getCachedJobTTLDays());
}

From source file:fr.xebia.cocktail.CocktailManager.java

/**
 * TODO use PUT instead of POST//from   w w  w.  j a  v  a  2 s.  c  o m
 *
 * @param id    id of the cocktail
 * @param photo to associate with the cocktail
 * @return redirection to display cocktail
 */
@RequestMapping(value = "/cocktail/{id}/photo", method = RequestMethod.POST)
public String updatePhoto(@PathVariable String id, @RequestParam("photo") MultipartFile photo) {

    if (!photo.isEmpty()) {
        try {
            String contentType = fileStorageService.findContentType(photo.getOriginalFilename());
            if (contentType == null) {
                logger.warn("photo",
                        "Skip file with unsupported extension '" + photo.getOriginalFilename() + "'");
            } else {

                InputStream photoInputStream = photo.getInputStream();
                long photoSize = photo.getSize();

                Map metadata = new TreeMap();
                metadata.put("Content-Length", Arrays.asList(new String[] { "" + photoSize }));
                metadata.put("Content-Type", Arrays.asList(new String[] { contentType }));
                metadata.put("Cache-Control", Arrays.asList(
                        new String[] { "public, max-age=" + TimeUnit.SECONDS.convert(365, TimeUnit.DAYS) }));

                /*    ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentLength(photoSize);
                    objectMetadata.setContentType(contentType);
                    objectMetadata.setCacheControl("public, max-age=" + TimeUnit.SECONDS.convert(365, TimeUnit.DAYS));*/
                String photoUrl = fileStorageService.storeFile(photo.getBytes(), metadata);

                Cocktail cocktail = cocktailRepository.get(id);
                logger.info("Saved {}", photoUrl);
                cocktail.setPhotoUrl(photoUrl);
                cocktailRepository.update(cocktail);
            }

        } catch (IOException e) {
            throw Throwables.propagate(e);
        }
    }
    return "redirect:/cocktail/" + id;
}

From source file:org.wso2.carbon.governance.custom.lifecycles.checklist.ui.clients.LifecycleManagementServiceClient.java

/**
 * This method is used to format a timestamp to 'dd:hh:mm:ss'.
 *
 * @param duration  timestamp duration.//from www . j a  v  a 2 s  .  c om
 * @return          formatted time duration to 'dd:hh:mm:ss'.
 */
private String formatTimeDuration(long duration) {
    String timeDuration;
    long days = TimeUnit.MILLISECONDS.toDays(duration);
    long hours = TimeUnit.MILLISECONDS.toHours(duration)
            - TimeUnit.DAYS.toHours(TimeUnit.MILLISECONDS.toDays(duration));
    long minutes = TimeUnit.MILLISECONDS.toMinutes(duration)
            - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(duration));
    long seconds = TimeUnit.MILLISECONDS.toSeconds(duration)
            - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(duration));
    // Setting the duration to a readable format.
    if (days == 0 && hours == 0 && minutes == 0) {
        timeDuration = String.format(durationSecondsFormat, seconds);
    } else if (days == 0 && hours == 0) {
        timeDuration = String.format(durationMinutesSecondsFormat, minutes, seconds);
    } else if (days == 0) {
        timeDuration = String.format(durationHoursMinutesSecondsFormat, hours, minutes, seconds);
    } else {
        timeDuration = String.format(durationDaysHoursMinutesSecondsFormat, days, hours, minutes, seconds);
    }
    return timeDuration;
}

From source file:org.jenkinsci.remoting.engine.HandlerLoopbackLoadStress.java

public HandlerLoopbackLoadStress(Config config)
        throws IOException, NoSuchAlgorithmException, CertificateException, KeyStoreException,
        UnrecoverableKeyException, KeyManagementException, OperatorCreationException {
    this.config = config;
    KeyPairGenerator gen = KeyPairGenerator.getInstance("RSA");
    gen.initialize(2048); // maximum supported by JVM with export restrictions
    keyPair = gen.generateKeyPair();//from  ww w.  j  av a  2s  . co m

    Date now = new Date();
    Date firstDate = new Date(now.getTime() + TimeUnit.DAYS.toMillis(10));
    Date lastDate = new Date(now.getTime() + TimeUnit.DAYS.toMillis(-10));

    SubjectPublicKeyInfo subjectPublicKeyInfo = SubjectPublicKeyInfo
            .getInstance(keyPair.getPublic().getEncoded());

    X500NameBuilder nameBuilder = new X500NameBuilder(BCStyle.INSTANCE);
    X500Name subject = nameBuilder.addRDN(BCStyle.CN, getClass().getSimpleName()).addRDN(BCStyle.C, "US")
            .build();

    X509v3CertificateBuilder certGen = new X509v3CertificateBuilder(subject, BigInteger.ONE, firstDate,
            lastDate, subject, subjectPublicKeyInfo);

    JcaX509ExtensionUtils instance = new JcaX509ExtensionUtils();

    certGen.addExtension(X509Extension.subjectKeyIdentifier, false,
            instance.createSubjectKeyIdentifier(subjectPublicKeyInfo));

    ContentSigner signer = new JcaContentSignerBuilder("SHA1withRSA").setProvider(BOUNCY_CASTLE_PROVIDER)
            .build(keyPair.getPrivate());

    certificate = new JcaX509CertificateConverter().setProvider(BOUNCY_CASTLE_PROVIDER)
            .getCertificate(certGen.build(signer));

    char[] password = "password".toCharArray();

    KeyStore store = KeyStore.getInstance("jks");
    store.load(null, password);
    store.setKeyEntry("alias", keyPair.getPrivate(), password, new Certificate[] { certificate });

    KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
    kmf.init(store, password);

    context = SSLContext.getInstance("TLS");
    context.init(kmf.getKeyManagers(), new TrustManager[] { new BlindTrustX509ExtendedTrustManager() }, null);

    mainHub = IOHub.create(executorService);
    // on windows there is a bug whereby you cannot mix ServerSockets and Sockets on the same selector
    acceptorHub = File.pathSeparatorChar == 59 ? IOHub.create(executorService) : mainHub;
    legacyHub = new NioChannelHub(executorService);
    executorService.submit(legacyHub);
    serverSocketChannel = ServerSocketChannel.open();

    JnlpProtocolHandler handler = null;
    for (JnlpProtocolHandler h : new JnlpProtocolHandlerFactory(executorService).withNioChannelHub(legacyHub)
            .withIOHub(mainHub).withSSLContext(context).withPreferNonBlockingIO(!config.bio)
            .withClientDatabase(new JnlpClientDatabase() {
                @Override
                public boolean exists(String clientName) {
                    return true;
                }

                @Override
                public String getSecretOf(@Nonnull String clientName) {
                    return secretFor(clientName);
                }
            }).withSSLClientAuthRequired(false).handlers()) {
        if (config.name.equals(h.getName())) {
            handler = h;
            break;
        }
    }
    if (handler == null) {
        throw new RuntimeException("Unknown handler: " + config.name);
    }
    this.handler = handler;

    acceptor = new Acceptor(serverSocketChannel);
    runtimeMXBean = ManagementFactory.getRuntimeMXBean();
    operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean();
    _getProcessCpuTime = _getProcessCpuTime(operatingSystemMXBean);
    garbageCollectorMXBeans = new ArrayList<GarbageCollectorMXBean>(
            ManagementFactory.getGarbageCollectorMXBeans());
    Collections.sort(garbageCollectorMXBeans, new Comparator<GarbageCollectorMXBean>() {
        @Override
        public int compare(GarbageCollectorMXBean o1, GarbageCollectorMXBean o2) {
            return o1.getName().compareTo(o2.getName());
        }
    });
    stats = new Stats();
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.quota.QuotaService.java

protected void computeAndApplyCharge(final Collection<ContainersLogs> hopContainersLogs,
        final boolean isRecover) throws IOException {
    LightWeightRequestHandler quotaSchedulerHandler = new LightWeightRequestHandler(YARNOperationType.TEST) {
        @Override// w ww  . ja  va2  s  .co m
        public Object performTask() throws IOException {
            connector.beginTransaction();
            connector.writeLock();

            //Get Data  ** YarnProjectsQuota **
            YarnProjectsQuotaDataAccess _pqDA = (YarnProjectsQuotaDataAccess) RMStorageFactory
                    .getDataAccess(YarnProjectsQuotaDataAccess.class);
            Map<String, YarnProjectsQuota> hopYarnProjectsQuotaMap = _pqDA.getAll();
            long _miliSec = System.currentTimeMillis();
            final long _day = TimeUnit.DAYS.convert(_miliSec, TimeUnit.MILLISECONDS);
            Map<String, YarnProjectsQuota> chargedYarnProjectsQuota = new HashMap<String, YarnProjectsQuota>();
            Map<YarnProjectsDailyId, YarnProjectsDailyCost> chargedYarnProjectsDailyCost = new HashMap<YarnProjectsDailyId, YarnProjectsDailyCost>();

            List<ContainersLogs> toBeRemovedContainersLogs = new ArrayList<ContainersLogs>();
            List<ContainerCheckPoint> toBeAddedContainerCheckPoint = new ArrayList<ContainerCheckPoint>();
            List<ContainerCheckPoint> toBeRemovedContainerCheckPoint = new ArrayList<ContainerCheckPoint>();

            // Calculate the quota
            LOG.debug("RIZ:: ContainersLogs count : " + hopContainersLogs.size());
            for (ContainersLogs _ycl : hopContainersLogs) {
                if (!isRecover && recovered.remove(_ycl.getContainerid())) {
                    continue;
                }
                if (isRecover) {
                    recovered.add(_ycl.getContainerid());
                }
                // Get ApplicationId from ContainerId
                LOG.debug("RIZ:: ContainersLogs entry : " + _ycl.toString());
                ContainerId _cId = ConverterUtils.toContainerId(_ycl.getContainerid());
                ApplicationId _appId = _cId.getApplicationAttemptId().getApplicationId();

                //Get ProjectId from ApplicationId in ** ApplicationState Table ** 
                String _appUser = applicationStateCache.get(_appId.toString());
                if (_appUser == null) {
                    ApplicationState _appStat = (ApplicationState) appStatDS
                            .findByApplicationId(_appId.toString());
                    if (_appStat == null) {
                        LOG.error("Application not found: " + _appId.toString() + " for container "
                                + _ycl.getContainerid());
                        continue;
                    } else {
                        if (applicationStateCache.size() > 100000) {
                            applicationStateCache = new HashMap<String, String>();
                        }
                        _appUser = _appStat.getUser();
                        applicationStateCache.put(_appId.toString(), _appUser);
                    }
                }

                String _projectName = HopsWorksHelper.getProjectName(_appUser);
                String _user = HopsWorksHelper.getUserName(_appUser);
                LOG.debug("RIZ:: App : " + _appId.toString() + " User : " + _appUser);

                //comput used ticks
                Long checkpoint = _ycl.getStart();
                float currentPrice = _ycl.getPrice();
                ContainerCheckPoint lastCheckPoint = containersCheckPoints.get(_ycl.getContainerid());
                if (lastCheckPoint != null) {
                    checkpoint = lastCheckPoint.getCheckPoint();
                    currentPrice = lastCheckPoint.getPrice();
                }
                long currentTicks = _ycl.getStop() - checkpoint;

                // Decide what to do with the ticks
                if (currentTicks > 0) {
                    if (_ycl.getExitstatus() == ContainerExitStatus.CONTAINER_RUNNING_STATE) {
                        //>> Edit log entry + Increase Quota
                        ContainerCheckPoint _tempCheckpointObj = new ContainerCheckPoint(_ycl.getContainerid(),
                                _ycl.getStop(), currentPrice);
                        containersCheckPoints.put(_ycl.getContainerid(), _tempCheckpointObj);
                        toBeAddedContainerCheckPoint.add(_tempCheckpointObj);

                        LOG.info("charging project still running " + _projectName + " for container "
                                + _ycl.getContainerid() + " current ticks " + currentTicks + "("
                                + _ycl.getStart() + ", " + _ycl.getStop() + ", " + checkpoint
                                + ") current price " + currentPrice);

                        chargeYarnProjectsQuota(chargedYarnProjectsQuota, hopYarnProjectsQuotaMap, _projectName,
                                _user, currentTicks, _ycl.getContainerid(), _ycl.getExitstatus(), currentPrice);

                        //** YarnProjectsDailyCost charging**
                        chargeYarnProjectsDailyCost(chargedYarnProjectsDailyCost, _projectName, _user, _day,
                                currentTicks, currentPrice);

                    } else {
                        //>> Delete log entry + Increase Quota
                        toBeRemovedContainersLogs.add((ContainersLogs) _ycl);
                        if (checkpoint != _ycl.getStart()) {
                            toBeRemovedContainerCheckPoint.add(
                                    new ContainerCheckPoint(_ycl.getContainerid(), checkpoint, currentPrice));
                            containersCheckPoints.remove(_ycl.getContainerid());
                        }
                        //** YarnProjectsQuota charging**
                        LOG.info("charging project finished " + _projectName + " for container "
                                + _ycl.getContainerid() + " current ticks " + currentTicks + " current price "
                                + currentPrice);
                        chargeYarnProjectsQuota(chargedYarnProjectsQuota, hopYarnProjectsQuotaMap, _projectName,
                                _user, currentTicks, _ycl.getContainerid(), _ycl.getExitstatus(), currentPrice);

                        //** YarnProjectsDailyCost charging**
                        chargeYarnProjectsDailyCost(chargedYarnProjectsDailyCost, _projectName, _user, _day,
                                currentTicks, currentPrice);

                    }
                } else if (_ycl.getExitstatus() == ContainerExitStatus.CONTAINER_RUNNING_STATE) {
                    //create a checkPoint at start to store the price.
                    ContainerCheckPoint _tempCheckpointObj = new ContainerCheckPoint(_ycl.getContainerid(),
                            _ycl.getStart(), currentPrice);
                    containersCheckPoints.put(_ycl.getContainerid(), _tempCheckpointObj);
                    toBeAddedContainerCheckPoint.add(_tempCheckpointObj);
                }
            }
            // Delet the finished ContainersLogs
            ContainersLogsDataAccess _csDA = (ContainersLogsDataAccess) RMStorageFactory
                    .getDataAccess(ContainersLogsDataAccess.class);
            _csDA.removeAll(toBeRemovedContainersLogs);

            //Add and remove Containers checkpoints
            ContainersCheckPointsDataAccess ccpDA = (ContainersCheckPointsDataAccess) RMStorageFactory
                    .getDataAccess(ContainersCheckPointsDataAccess.class);
            ccpDA.addAll(toBeAddedContainerCheckPoint);
            ccpDA.removeAll(toBeRemovedContainerCheckPoint);

            // Show all charged project
            if (LOG.isDebugEnabled()) {
                for (YarnProjectsQuota _cpq : chargedYarnProjectsQuota.values()) {
                    LOG.debug("RIZ:: Charged projects: " + _cpq.toString() + " charge amount:"
                            + _cpq.getTotalUsedQuota());
                }
            }

            // Add all the changed project quota to NDB
            _pqDA.addAll(chargedYarnProjectsQuota.values());
            YarnProjectsDailyCostDataAccess _pdcDA = (YarnProjectsDailyCostDataAccess) RMStorageFactory
                    .getDataAccess(YarnProjectsDailyCostDataAccess.class);
            _pdcDA.addAll(chargedYarnProjectsDailyCost.values());
            connector.commit();
            return null;
        }

    };
    quotaSchedulerHandler.handle();
}