List of usage examples for org.joda.time DateTime getMillis
public long getMillis()
From source file:com.moss.maven.util.MavenPomPropertiesDateFormatter.java
License:Open Source License
public DateTime parseDateTime(String mavenFormattedDateAndTime) { // THE JODA PARSER DOESN'T HANDLE TIMEZONES WELL, SO WE'RE PULLING THE ZONE OUT AND PARSING IT SEPARATELY int length = mavenFormattedDateAndTime.length(); String timezoneText = mavenFormattedDateAndTime.substring(length - 8, length - 5).trim(); String yearText = mavenFormattedDateAndTime.substring(length - 4).trim(); String jodaParseableText = mavenFormattedDateAndTime.substring(0, length - 8).trim() + " " + yearText; // PARSE THE ZONE DateTimeZone timeZone;//w ww. j a v a 2s .c o m if ("EDT".equals(timezoneText)) timeZone = DateTimeZone.forID("America/New_York"); else timeZone = DateTimeZone.forID(timezoneText); // PARSE THE STRING WITHOUT THE ZONE INFO DateTimeFormatter fmt = DateTimeFormat.forPattern("EEE MMM dd HH:mm:ss YYYY"); DateTime dateTime = fmt.parseDateTime(jodaParseableText); // ADD THE ZONE BACK dateTime = new DateTime(dateTime.getMillis(), timeZone); return dateTime; }
From source file:com.mycollab.vaadin.web.ui.field.DateTimeOptionField.java
License:Open Source License
private Date getDateValue() { Date selectDate = popupDateField.getValue(); if (selectDate == null) { return null; }// w w w .j a v a 2 s . c o m DateTime jodaSelectDate = new DateTime(selectDate) .toDateTime(DateTimeZone.forTimeZone(UserUIContext.getUserTimeZone())); Date baseDate = new LocalDate(jodaSelectDate).toDate(); if (hideTimeOption) { return new LocalDateTime(baseDate).toDateTime(DateTimeZone.forTimeZone(UserUIContext.getUserTimeZone())) .toDate(); } else { Integer hour = (hourPickerComboBox.getValue() != null) ? Integer.parseInt((String) hourPickerComboBox.getValue()) : 0; Integer minus = (minutePickerComboBox.getValue() != null) ? Integer.parseInt((String) minutePickerComboBox.getValue()) : 0; String timeFormat = (timeFormatComboBox.getValue() != null) ? (String) timeFormatComboBox.getValue() : "AM"; long milliseconds = calculateMilliSeconds(hour, minus, timeFormat); DateTime jodaTime = new DateTime(baseDate).plus(new Duration(milliseconds)); return new LocalDateTime(jodaTime.getMillis()) .toDateTime(DateTimeZone.forTimeZone(UserUIContext.getUserTimeZone())).toDate(); } }
From source file:com.mysema.query.sql.types.DateTimeType.java
License:Apache License
@Override public void setValue(PreparedStatement st, int startIndex, DateTime value) throws SQLException { st.setTimestamp(startIndex, new Timestamp(value.getMillis())); }
From source file:com.nesscomputing.cache.MemcacheProvider.java
License:Apache License
/** Memcache expects expiration dates in seconds since the epoch. */ public static int computeMemcacheExpiry(@Nullable DateTime when) { return when == null ? -1 : Ints.saturatedCast(when.getMillis() / 1000); }
From source file:com.nestedbird.models.eventtime.EventTime.java
License:Open Source License
/** * retrieves future occurrences of this event time * * @return list of occurrences//from w ww . j ava 2 s . com */ @JsonIgnore public List<ParsedEventData> getFutureOccurrences() { DateTime thisMorning = DateTime.now().withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0); return getOccurrences(thisMorning.getMillis()); }
From source file:com.netflix.dynomitemanager.sidecore.backup.S3Backup.java
License:Apache License
/** * Uses the Amazon S3 API to upload the AOF/RDB to S3 * Filename: Backup location + DC + Rack + App + Token *//*from ww w. j a v a 2s. co m*/ @Override public boolean upload(File file, DateTime todayStart) { logger.info("Snapshot backup: sending " + file.length() + " bytes to S3"); /* Key name is comprised of the * backupDir + DC + Rack + token + Date */ String keyName = config.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/" + iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + todayStart.getMillis(); // Get bucket location. logger.info("Key in Bucket: " + keyName); logger.info("S3 Bucket Name:" + config.getBucketName()); AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider()); try { // Checking if the S3 bucket exists, and if does not, then we create it if (!(s3Client.doesBucketExist(config.getBucketName()))) { logger.error("Bucket with name: " + config.getBucketName() + " does not exist"); return false; } else { logger.info("Uploading data to S3\n"); // Create a list of UploadPartResponse objects. You get one of these for // each part upload. List<PartETag> partETags = new ArrayList<PartETag>(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest( config.getBucketName(), keyName); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); long contentLength = file.length(); long filePosition = 0; long partSize = this.initPartSize; try { for (int i = 1; filePosition < contentLength; i++) { // Last part can be less than initPartSize (500MB). Adjust part size. partSize = Math.min(partSize, (contentLength - filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest() .withBucketName(config.getBucketName()).withKey(keyName) .withUploadId(initResponse.getUploadId()).withPartNumber(i) .withFileOffset(filePosition).withFile(file).withPartSize(partSize); // Upload part and add response to our list. partETags.add(s3Client.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest( config.getBucketName(), keyName, initResponse.getUploadId(), partETags); s3Client.completeMultipartUpload(compRequest); } catch (Exception e) { logger.error("Abosting multipart upload due to error"); s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(config.getBucketName(), keyName, initResponse.getUploadId())); } return true; } } catch (AmazonServiceException ase) { logger.error( "AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error "); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); return false; } catch (AmazonClientException ace) { logger.error("AmazonClientException;" + " the client encountered " + "an internal error while trying to " + "communicate with S3, "); logger.error("Error Message: " + ace.getMessage()); return false; } }
From source file:com.netflix.dynomitemanager.sidecore.backup.S3Restore.java
License:Apache License
private long restoreTime(String dateString) { logger.info("Date to restore to: " + dateString); DateTimeFormatter formatter = null; try {/*w w w . java2 s .c o m*/ formatter = DateTimeFormat.forPattern("yyyyMMdd"); } catch (Exception e) { logger.error("Restore fast property not formatted properly " + e.getMessage()); return -1; } DateTime dt = formatter.parseDateTime(dateString); DateTime dateBackup = dt.withTimeAtStartOfDay(); return dateBackup.getMillis(); }
From source file:com.netflix.ice.processor.BillingFileProcessor.java
License:Apache License
@Override protected void poll() throws Exception { TreeMap<DateTime, List<BillingFile>> filesToProcess = Maps.newTreeMap(); Map<DateTime, List<BillingFile>> monitorFilesToProcess = Maps.newTreeMap(); // list the tar.gz file in billing file folder for (int i = 0; i < config.billingS3BucketNames.length; i++) { String billingS3BucketName = config.billingS3BucketNames[i]; String billingS3BucketPrefix = config.billingS3BucketPrefixes.length > i ? config.billingS3BucketPrefixes[i] : ""; String accountId = config.billingAccountIds.length > i ? config.billingAccountIds[i] : ""; String billingAccessRoleName = config.billingAccessRoleNames.length > i ? config.billingAccessRoleNames[i] : ""; String billingAccessExternalId = config.billingAccessExternalIds.length > i ? config.billingAccessExternalIds[i] : ""; logger.info("trying to list objects in billing bucket " + billingS3BucketName + " using assume role, and external id " + billingAccessRoleName + " " + billingAccessExternalId); List<S3ObjectSummary> objectSummaries = AwsUtils.listAllObjects(billingS3BucketName, billingS3BucketPrefix, accountId, billingAccessRoleName, billingAccessExternalId); logger.info("found " + objectSummaries.size() + " in billing bucket " + billingS3BucketName); TreeMap<DateTime, S3ObjectSummary> filesToProcessInOneBucket = Maps.newTreeMap(); Map<DateTime, S3ObjectSummary> monitorFilesToProcessInOneBucket = Maps.newTreeMap(); // for each file, download&process if not needed for (S3ObjectSummary objectSummary : objectSummaries) { String fileKey = objectSummary.getKey(); DateTime dataTime = AwsUtils.getDateTimeFromFileNameWithTags(fileKey); boolean withTags = true; if (dataTime == null) { dataTime = AwsUtils.getDateTimeFromFileName(fileKey); withTags = false;/* w w w.jav a 2s.co m*/ } if (dataTime != null && !dataTime.isBefore(config.startDate)) { if (!filesToProcessInOneBucket.containsKey(dataTime) || withTags && config.resourceService != null || !withTags && config.resourceService == null) filesToProcessInOneBucket.put(dataTime, objectSummary); else logger.info("ignoring file " + objectSummary.getKey()); } else { logger.info("ignoring file " + objectSummary.getKey()); } } for (S3ObjectSummary objectSummary : objectSummaries) { String fileKey = objectSummary.getKey(); DateTime dataTime = AwsUtils.getDateTimeFromFileNameWithMonitoring(fileKey); if (dataTime != null && !dataTime.isBefore(config.startDate)) { monitorFilesToProcessInOneBucket.put(dataTime, objectSummary); } } for (DateTime key : filesToProcessInOneBucket.keySet()) { List<BillingFile> list = filesToProcess.get(key); if (list == null) { list = Lists.newArrayList(); filesToProcess.put(key, list); } list.add(new BillingFile(filesToProcessInOneBucket.get(key), accountId, billingAccessRoleName, billingAccessExternalId, billingS3BucketPrefix)); } for (DateTime key : monitorFilesToProcessInOneBucket.keySet()) { List<BillingFile> list = monitorFilesToProcess.get(key); if (list == null) { list = Lists.newArrayList(); monitorFilesToProcess.put(key, list); } list.add(new BillingFile(monitorFilesToProcessInOneBucket.get(key), accountId, billingAccessRoleName, billingAccessExternalId, billingS3BucketPrefix)); } } for (DateTime dataTime : filesToProcess.keySet()) { startMilli = endMilli = dataTime.getMillis(); init(); boolean hasNewFiles = false; boolean hasTags = false; long lastProcessed = lastProcessTime(AwsUtils.monthDateFormat.print(dataTime)); for (BillingFile billingFile : filesToProcess.get(dataTime)) { S3ObjectSummary objectSummary = billingFile.s3ObjectSummary; if (objectSummary.getLastModified().getTime() < lastProcessed) { logger.info("data has been processed. ignoring " + objectSummary.getKey() + "..."); continue; } hasNewFiles = true; } if (!hasNewFiles) { logger.info("data has been processed. ignoring all files at " + AwsUtils.monthDateFormat.print(dataTime)); continue; } long processTime = new DateTime(DateTimeZone.UTC).getMillis(); for (BillingFile billingFile : filesToProcess.get(dataTime)) { S3ObjectSummary objectSummary = billingFile.s3ObjectSummary; String fileKey = objectSummary.getKey(); File file = new File(config.localDir, fileKey.substring(billingFile.prefix.length())); logger.info("trying to download " + fileKey + "..."); boolean downloaded = AwsUtils.downloadFileIfChangedSince(objectSummary.getBucketName(), billingFile.prefix, file, lastProcessed, billingFile.accountId, billingFile.accessRoleName, billingFile.externalId); if (downloaded) logger.info("downloaded " + fileKey); else { logger.info("file already downloaded " + fileKey + "..."); } logger.info("processing " + fileKey + "..."); boolean withTags = fileKey.contains("with-resources-and-tags"); hasTags = hasTags || withTags; processingMonitor = false; processBillingZipFile(file, withTags); logger.info("done processing " + fileKey); } if (monitorFilesToProcess.get(dataTime) != null) { for (BillingFile monitorBillingFile : monitorFilesToProcess.get(dataTime)) { S3ObjectSummary monitorObjectSummary = monitorBillingFile.s3ObjectSummary; if (monitorObjectSummary != null) { String monitorFileKey = monitorObjectSummary.getKey(); logger.info("processing " + monitorFileKey + "..."); File monitorFile = new File(config.localDir, monitorFileKey.substring(monitorFileKey.lastIndexOf("/") + 1)); logger.info("trying to download " + monitorFileKey + "..."); boolean downloaded = AwsUtils.downloadFileIfChangedSince( monitorObjectSummary.getBucketName(), monitorBillingFile.prefix, monitorFile, lastProcessed, monitorBillingFile.accountId, monitorBillingFile.accessRoleName, monitorBillingFile.externalId); if (downloaded) logger.info("downloaded " + monitorFile); else logger.warn(monitorFile + "already downloaded..."); FileInputStream in = new FileInputStream(monitorFile); try { processingMonitor = true; processBillingFile(monitorFile.getName(), in, true); } catch (Exception e) { logger.error("Error processing " + monitorFile, e); } finally { in.close(); } } } } if (dataTime.equals(filesToProcess.lastKey())) { int hours = (int) ((endMilli - startMilli) / 3600000L); logger.info("cut hours to " + hours); cutData(hours); } // now get reservation capacity to calculate upfront and un-used cost for (Ec2InstanceReservationPrice.ReservationUtilization utilization : Ec2InstanceReservationPrice.ReservationUtilization .values()) processReservations(utilization); if (hasTags && config.resourceService != null) config.resourceService.commit(); logger.info("archiving results for " + dataTime + "..."); archive(); logger.info("done archiving " + dataTime); updateProcessTime(AwsUtils.monthDateFormat.print(dataTime), processTime); if (dataTime.equals(filesToProcess.lastKey())) { sendOndemandCostAlert(); } } logger.info("AWS usage processed."); }
From source file:com.netflix.simianarmy.aws.janitor.rule.asg.OldEmptyASGRule.java
License:Apache License
private void markResource(Resource resource, DateTime now) { if (resource.getExpectedTerminationTime() == null) { Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays); resource.setExpectedTerminationTime(terminationTime); resource.setTerminationReason(//w ww. j av a 2 s . c o m String.format("Launch config older than %d days. Not in Discovery. No ELB.", launchConfigAgeThreshold + retentionDays)); } else { LOGGER.info(String.format("Resource %s is already marked as cleanup candidate.", resource.getId())); } }
From source file:com.netflix.simianarmy.aws.janitor.rule.asg.SuspendedASGRule.java
License:Apache License
/** {@inheritDoc} */ @Override/*from w ww. j ava2 s.c o m*/ public boolean isValid(Resource resource) { Validate.notNull(resource); if (!"ASG".equals(resource.getResourceType().name())) { return true; } if (instanceValidator.hasActiveInstance(resource)) { return true; } String suspensionTimeStr = resource.getAdditionalField(ASGJanitorCrawler.ASG_FIELD_SUSPENSION_TIME); if (!StringUtils.isEmpty(suspensionTimeStr)) { DateTime createTime = ASGJanitorCrawler.SUSPENSION_TIME_FORMATTER.parseDateTime(suspensionTimeStr); DateTime now = new DateTime(calendar.now().getTimeInMillis()); if (now.isBefore(createTime.plusDays(suspensionAgeThreshold))) { LOGGER.info(String.format("The ASG %s has not been suspended for more than %d days", resource.getId(), suspensionAgeThreshold)); return true; } LOGGER.info(String.format("The ASG %s has been suspended for more than %d days", resource.getId(), suspensionAgeThreshold)); if (resource.getExpectedTerminationTime() == null) { Date terminationTime = calendar.getBusinessDay(new Date(now.getMillis()), retentionDays); resource.setExpectedTerminationTime(terminationTime); resource.setTerminationReason(String.format( "User suspended age more than %d days and all instances are out of service in Discovery", suspensionAgeThreshold + retentionDays)); } return false; } else { LOGGER.info(String.format("ASG %s is not suspended from ELB.", resource.getId())); return true; } }