Example usage for org.apache.commons.io FileUtils ONE_MB

List of usage examples for org.apache.commons.io FileUtils ONE_MB

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils ONE_MB.

Prototype

long ONE_MB

To view the source code for org.apache.commons.io FileUtils ONE_MB.

Click Source Link

Document

The number of bytes in a megabyte.

Usage

From source file:com.perceptive.epm.perkolcentral.action.ImageNowLicenseAction.java

public String executeImageNowLicenseRequest() throws ExceptionWrapper {
    try {//from www.j  a va 2  s  . c o  m
        rallyGroups = groupsBL.getAllRallyGroups();
        getSpecificGroupsThatRequireINLicense();
        //Check whether this person is part of perceptive kolkata
        if (ActionContext.getContext().getSession().get(Constants.logged_in_user) == null) {
            errorMessage = errorMessage
                    + "* You have to be a part of Perceptive Kolkata Group to request for ImageNow Development License."
                    + IOUtils.LINE_SEPARATOR;
            return ERROR;
        }
        if (!FilenameUtils.getExtension(sysfpFileFileName).equalsIgnoreCase("sysfp")) {
            errorMessage = errorMessage + "* Please upload a file of type sysfp." + IOUtils.LINE_SEPARATOR;
            return ERROR;
        }
        if (FileUtils.sizeOf(sysfpFile) > FileUtils.ONE_MB * 2) {
            errorMessage = errorMessage + "* Please upload a file of size less than 2 MB."
                    + IOUtils.LINE_SEPARATOR;
            return ERROR;
        }

        String employeeUID = ((EmployeeBO) ActionContext.getContext().getSession()
                .get(Constants.logged_in_user)).getEmployeeUid();
        //Get the logged in userId
        //rallyGroups = groupsBL.getAllRallyGroups();
        //getSpecificGroupsThatRequireINLicense();

        imagenowlicenses.setImageNowLicenseRequestId(UUID.randomUUID().toString());
        imagenowlicenses.setLicenseRequestedOn(Calendar.getInstance().getTime());
        imagenowlicenses.setFileName(sysfpFileFileName);

        imageNowLicenseBL.addImageNowLicenseRequest(imagenowlicenses, groupRequestedFor, employeeUID, sysfpFile,
                sysfpFileFileName);
        imagenowlicensesArrayList = imageNowLicenseBL.getLicensesRequestedByMe(employeeUID);

    } catch (Exception ex) {
        throw new ExceptionWrapper(ex);
    }
    return SUCCESS;
}

From source file:net.sf.zekr.engine.audio.AudioCacheManager.java

/**
 * Removes {@link #getCapacitySize()} megabytes of files from user's audio cache, so that cache size limit
 * ({@link #getCapacitySize()}) is met. It simply ignores if audio cache size is not exceeded
 * {@link #getCapacitySize()}.//from  w w  w. j  av  a  2 s . c  o  m
 */
@SuppressWarnings("unchecked")
public void flushCache() {
    logger.info("Flush audio cache.");
    long cacheSize = FileUtils.sizeOfDirectory(userPath);
    if (cacheSize > FileUtils.ONE_MB * capacitySize) {
        logger.info("Capacity size is " + capacitySize + " MB, of which "
                + FileUtils.byteCountToDisplaySize(cacheSize) + " is used. Flush size is " + flushSize
                + " MB.");
        Collection<File> audioDirectoryList = FileUtils.listFiles(userPath, new AbstractFileFilter() {
            public boolean accept(File file) {
                if (file.isDirectory()) {
                    return true;
                } else {
                    return false;
                }
            }
        }, null);

        List<File> fileList = new ArrayList<File>();
        for (File dir : audioDirectoryList) {
            fileList.addAll(Arrays.asList(dir.listFiles()));
        }

        // return older files first
        Collections.sort(fileList, LastModifiedFileComparator.LASTMODIFIED_REVERSE);
        long deleteSize = 0;
        for (int i = 0; i < fileList.size(); i++) {
            if (deleteSize > flushSize + FileUtils.ONE_MB) {
                logger.info("Cache flushing suffices. " + FileUtils.byteCountToDisplaySize(deleteSize)
                        + " were deleted.");
                break;
            }
            File file = fileList.get(i);
            deleteSize += file.length();
            logger.debug("Delete: " + file);
            file.delete();
        }
    } else {
        logger.info("No flush is required.");
    }
}

From source file:com.linkedin.drelephant.tuning.BaselineComputeUtil.java

/**
 * Returns the average input size in bytes of a job (over last _numJobsForBaseline executions)
 * @param jobDefId job definition id of the job
 * @return average input size in bytes as long
 *///  ww w.j  av  a  2s.co  m
private Long getAvgInputSizeInBytes(String jobDefId) {
    String sql = "SELECT AVG(inputSizeInBytes) as avgInputSizeInMB FROM "
            + "(SELECT job_exec_id, SUM(cast(value as decimal)) inputSizeInBytes, MAX(start_time) AS start_time "
            + "FROM yarn_app_result yar INNER JOIN yarn_app_heuristic_result yahr "
            + "ON yar.id=yahr.yarn_app_result_id " + "INNER JOIN yarn_app_heuristic_result_details yahrd "
            + "ON yahr.id=yahrd.yarn_app_heuristic_result_id "
            + "WHERE job_def_id=:jobDefId AND yahr.heuristic_name='" + CommonConstantsHeuristic.MAPPER_SPEED
            + "' " + "AND yahrd.name='Total input size in MB' "
            + "GROUP BY job_exec_id ORDER BY start_time DESC LIMIT :num ) temp";

    logger.debug("Running query for average input size computation " + sql);

    SqlRow baseline = Ebean.createSqlQuery(sql).setParameter("jobDefId", jobDefId)
            .setParameter("num", _numJobsForBaseline).findUnique();
    Double avgInputSizeInBytes = baseline.getDouble("avgInputSizeInMB") * FileUtils.ONE_MB;
    return avgInputSizeInBytes.longValue();
}

From source file:com.linkedin.drelephant.mapreduce.fetchers.MapReduceFSFetcherHadoop2.java

@Override
public MapReduceApplicationData fetchData(AnalyticJob job) throws IOException {
    DataFiles files = getHistoryFiles(job);
    String confFile = files.getJobConfPath();
    String histFile = files.getJobHistPath();
    String appId = job.getAppId();
    String jobId = Utils.getJobIdFromApplicationId(appId);

    MapReduceApplicationData jobData = new MapReduceApplicationData();
    jobData.setAppId(appId).setJobId(jobId);

    // Fetch job config
    Configuration jobConf = new Configuration(false);
    jobConf.addResource(_fs.open(new Path(confFile)), confFile);
    Properties jobConfProperties = new Properties();
    for (Map.Entry<String, String> entry : jobConf) {
        jobConfProperties.put(entry.getKey(), entry.getValue());
    }//  ww  w.ja  v  a2 s  .c o m
    jobData.setJobConf(jobConfProperties);

    // Check if job history file is too large and should be throttled
    if (_fs.getFileStatus(new Path(histFile)).getLen() > _maxLogSizeInMB * FileUtils.ONE_MB) {
        String errMsg = "The history log of MapReduce application: " + appId + " is over the limit size of "
                + _maxLogSizeInMB + " MB, the parsing process gets throttled.";
        logger.warn(errMsg);
        jobData.setDiagnosticInfo(errMsg);
        jobData.setSucceeded(false); // set succeeded to false to avoid heuristic analysis
        return jobData;
    }

    // Analyze job history file
    JobHistoryParser parser = new JobHistoryParser(_fs, histFile);
    JobHistoryParser.JobInfo jobInfo = parser.parse();
    IOException parseException = parser.getParseException();
    if (parseException != null) {
        throw new RuntimeException("Could not parse history file " + histFile, parseException);
    }

    jobData.setSubmitTime(jobInfo.getSubmitTime());
    jobData.setStartTime(jobInfo.getLaunchTime());
    jobData.setFinishTime(jobInfo.getFinishTime());

    String state = jobInfo.getJobStatus();
    if (state.equals("SUCCEEDED")) {

        jobData.setSucceeded(true);

        // Fetch job counter
        MapReduceCounterData jobCounter = getCounterData(jobInfo.getTotalCounters());

        // Fetch task data
        Map<TaskID, JobHistoryParser.TaskInfo> allTasks = jobInfo.getAllTasks();
        List<JobHistoryParser.TaskInfo> mapperInfoList = new ArrayList<JobHistoryParser.TaskInfo>();
        List<JobHistoryParser.TaskInfo> reducerInfoList = new ArrayList<JobHistoryParser.TaskInfo>();
        for (JobHistoryParser.TaskInfo taskInfo : allTasks.values()) {
            if (taskInfo.getTaskType() == TaskType.MAP) {
                mapperInfoList.add(taskInfo);
            } else {
                reducerInfoList.add(taskInfo);
            }
        }
        if (jobInfo.getTotalMaps() > MAX_SAMPLE_SIZE) {
            logger.debug(jobId + " total mappers: " + mapperInfoList.size());
        }
        if (jobInfo.getTotalReduces() > MAX_SAMPLE_SIZE) {
            logger.debug(jobId + " total reducers: " + reducerInfoList.size());
        }
        MapReduceTaskData[] mapperList = getTaskData(jobId, mapperInfoList);
        MapReduceTaskData[] reducerList = getTaskData(jobId, reducerInfoList);

        jobData.setCounters(jobCounter).setMapperData(mapperList).setReducerData(reducerList);
    } else if (state.equals("FAILED")) {

        jobData.setSucceeded(false);
        jobData.setDiagnosticInfo(jobInfo.getErrorInfo());
    } else {
        // Should not reach here
        throw new RuntimeException("Job state not supported. Should be either SUCCEEDED or FAILED");
    }

    return jobData;
}

From source file:com.linkedin.drelephant.tuning.FitnessComputeUtil.java

/**
 * Returns the total input size/*from  w w  w  .  j a v  a2  s.  co m*/
 * @param appResult appResult
 * @return total input size
 */
private Long getTotalInputBytes(AppResult appResult) {
    Long totalInputBytes = 0L;
    if (appResult.yarnAppHeuristicResults != null) {
        for (AppHeuristicResult appHeuristicResult : appResult.yarnAppHeuristicResults) {
            if (appHeuristicResult.heuristicName.equals(CommonConstantsHeuristic.MAPPER_SPEED)) {
                if (appHeuristicResult.yarnAppHeuristicResultDetails != null) {
                    for (AppHeuristicResultDetails appHeuristicResultDetails : appHeuristicResult.yarnAppHeuristicResultDetails) {
                        if (appHeuristicResultDetails.name
                                .equals(CommonConstantsHeuristic.TOTAL_INPUT_SIZE_IN_MB)) {
                            totalInputBytes += Math.round(
                                    Double.parseDouble(appHeuristicResultDetails.value) * FileUtils.ONE_MB);
                        }
                    }
                }
            }
        }
    }
    return totalInputBytes;
}

From source file:com.sunchenbin.store.feilong.core.io.FileUtil.java

/**
 * ??.//from ww  w  .j  a  va  2s.c o  m
 * 
 * <p>
 * ???? GB MB KB???? Bytes
 * </p>
 * 
 * <p>
 * Common-io 2.4{@link org.apache.commons.io.FileUtils#byteCountToDisplaySize(long)},GB ??1.99G 1G,apache ?
 * </p>
 * 
 * @param fileSize
 *            ? ??byte
 * @return ?byte ?
 * @see #getFileSize(File)
 * @see org.apache.commons.io.FileUtils#ONE_GB
 * @see org.apache.commons.io.FileUtils#ONE_MB
 * @see org.apache.commons.io.FileUtils#ONE_KB
 * 
 * @see org.apache.commons.io.FileUtils#byteCountToDisplaySize(long)
 */
public static String formatSize(long fileSize) {
    String danwei = "";
    long chushu = 1;// 
    if (fileSize >= FileUtils.ONE_GB) {
        danwei = "GB";
        chushu = FileUtils.ONE_GB;
    } else if (fileSize >= FileUtils.ONE_MB) {
        danwei = "MB";
        chushu = FileUtils.ONE_MB;
    } else if (fileSize >= FileUtils.ONE_KB) {
        danwei = "KB";
        chushu = FileUtils.ONE_KB;
    } else {
        return fileSize + "Bytes";
    }
    String yushu = 100 * (fileSize % chushu) / chushu + ""; // ?
    if ("0".equals(yushu)) {
        return fileSize / chushu + danwei;
    }
    return fileSize / chushu + "." + yushu + danwei;
}

From source file:com.linkedin.drelephant.tuning.ParamGenerator.java

/**
 * Check if the parameters violated constraints
 * Constraint 1: sort.mb > 60% of map.memory: To avoid heap memory failure
 * Constraint 2: map.memory - sort.mb < 768: To avoid heap memory failure
 * Constraint 3: pig.maxCombinedSplitSize > 1.8*mapreduce.map.memory.mb
 * @param jobSuggestedParamValueList/*from   ww  w .  j  ava2 s .c o m*/
 * @return true if the constraint is violated, false otherwise
 */
private boolean isParamConstraintViolated(List<JobSuggestedParamValue> jobSuggestedParamValueList) {
    logger.info("Checking whether parameter values are within constraints");

    Integer violations = 0;
    Double mrSortMemory = null;
    Double mrMapMemory = null;
    Double pigMaxCombinedSplitSize = null;

    for (JobSuggestedParamValue jobSuggestedParamValue : jobSuggestedParamValueList) {
        if (jobSuggestedParamValue.tuningParameter.paramName.equals("mapreduce.task.io.sort.mb")) {
            mrSortMemory = jobSuggestedParamValue.paramValue;
        } else if (jobSuggestedParamValue.tuningParameter.paramName.equals("mapreduce.map.memory.mb")) {
            mrMapMemory = jobSuggestedParamValue.paramValue;
        } else if (jobSuggestedParamValue.tuningParameter.paramName.equals("pig.maxCombinedSplitSize")) {
            pigMaxCombinedSplitSize = jobSuggestedParamValue.paramValue / FileUtils.ONE_MB;
        }
    }

    if (mrSortMemory != null && mrMapMemory != null) {
        if (mrSortMemory > 0.6 * mrMapMemory) {
            logger.info("Constraint violated: Sort memory > 60% of map memory");
            violations++;
        }
        if (mrMapMemory - mrSortMemory < 768) {
            logger.info("Constraint violated: Map memory - sort memory < 768 mb");
            violations++;
        }
    }

    if (pigMaxCombinedSplitSize != null && mrMapMemory != null
            && (pigMaxCombinedSplitSize > 1.8 * mrMapMemory)) {
        logger.info("Constraint violated: Pig max combined split size > 1.8 * map memory");
        violations++;
    }

    if (violations == 0) {
        return false;
    } else {
        logger.info("Number of constraint(s) violated: " + violations);
        return true;
    }
}

From source file:edu.kit.dama.dataworkflow.util.DataWorkflowHelper.java

/**
 * Helper method to perform the actual substitution.
 *
 * @param pTask The task whose working directory should be checked for
 * substitution.//w  w w  .  j  av  a 2  s .c  o m
 * @param pTargetPath The target path.
 *
 * @throws IOException If the replacement operation fails for some reason.
 * @throws URISyntaxException If any of the URLs in the task (input, output,
 * temp or working dir URL) is invalid.
 */
private static void performSubstitution(DataWorkflowTask pTask, File pDirectory)
        throws IOException, URISyntaxException {
    File[] relevantFileList = pDirectory.listFiles(VAR_FILTER);
    LOGGER.info("Substituting variables in " + relevantFileList.length
            + ((relevantFileList.length == 1) ? " file" : " files"));

    for (File f : relevantFileList) {
        if (f.length() > 10 * FileUtils.ONE_MB) {
            LOGGER.warn(
                    "File {} has a size of {} bytes. Variable substitution is only supported for files with less than 10MB. File is skipped.",
                    f, f.length());
            continue;
        }
        //perform replacement
        LOGGER.info(" * Substituting variables in file '" + f.getPath() + "'");
        DataInputStream din = null;
        FileOutputStream fout = null;
        try {
            LOGGER.info("   - Reading input file");
            byte[] data = new byte[(int) f.length()];
            din = new DataInputStream(new FileInputStream(f));
            din.readFully(data);

            LOGGER.info("   - Substituting variables");
            String dataString = new String(data);

            String accessPointId = pTask.getExecutionEnvironment().getStagingAccessPointId();
            AbstractStagingAccessPoint accessPoint = StagingConfigurationManager.getSingleton()
                    .getAccessPointById(accessPointId);
            LOGGER.debug("  - Obtaining local path for input dir URL {}", pTask.getInputDirectoryUrl());
            File localPath = accessPoint.getLocalPathForUrl(new URL(pTask.getInputDirectoryUrl()),
                    getTaskContext(pTask));
            LOGGER.debug("  - Local path is: {}", localPath);
            String inputDirReplacement = localPath.getCanonicalPath();

            LOGGER.debug("  - Obtaining local path for output dir URL {}", pTask.getOutputDirectoryUrl());
            localPath = accessPoint.getLocalPathForUrl(new URL(pTask.getOutputDirectoryUrl()),
                    getTaskContext(pTask));
            String outputDirReplacement = localPath.getCanonicalPath();

            LOGGER.debug("  - Obtaining local path for working dir URL {}", pTask.getWorkingDirectoryUrl());
            localPath = accessPoint.getLocalPathForUrl(new URL(pTask.getWorkingDirectoryUrl()),
                    getTaskContext(pTask));
            String workingDirReplacement = localPath.getCanonicalPath();

            LOGGER.debug("  - Obtaining local path for temp dir URL {}", pTask.getTempDirectoryUrl());
            localPath = accessPoint.getLocalPathForUrl(new URL(pTask.getTempDirectoryUrl()),
                    getTaskContext(pTask));
            String tempDirReplacement = localPath.getCanonicalPath();

            LOGGER.info("     " + DATA_IN_DIR + ": " + inputDirReplacement);
            LOGGER.info("     " + DATA_OUT_DIR + ": " + outputDirReplacement);
            LOGGER.info("     " + TEMP_DIR + ": " + tempDirReplacement);
            LOGGER.info("     " + WORKING_DIR + ": " + workingDirReplacement);
            //replace all variables
            //To obtain a proper path format the input paths are put into a file object and the URI path is used for replacement. Therefore differences between
            //source and destination platform are not relevant. Due to the URI.toPath() returns the path with leading slash, we use the path beginning with
            //the second index to avoid problems with other programming languages not able to deal with the leading slash.
            dataString = dataString.replaceAll(Pattern.quote(DATA_IN_DIR_VARIABLE), inputDirReplacement)
                    .replaceAll(Pattern.quote(DATA_OUT_DIR_VARIABLE), outputDirReplacement)
                    .replaceAll(Pattern.quote(TEMP_DIR_VARIABLE), tempDirReplacement)
                    .replaceAll(Pattern.quote(WORKING_DIR_VARIABLE), workingDirReplacement);
            LOGGER.info("   - Writing output file");
            fout = new FileOutputStream(f);
            fout.write(dataString.getBytes());
            fout.flush();
            LOGGER.info(" * Substituting operations finished successfully");
        } finally {
            try {
                if (din != null) {
                    din.close();
                }
            } catch (IOException ioe) {
            }
            try {
                if (fout != null) {
                    fout.close();
                }
            } catch (IOException ioe) {
            }
        }
    }
    LOGGER.info("Directory {} processed successfully", pDirectory);
}

From source file:org.ado.biblio.desktop.update.UpdatePresenter.java

private FileSizeConverter.SizeUnit getSizeUnit(long artifactSize) {
    if (artifactSize > FileUtils.ONE_MB) {
        return FileSizeConverter.SizeUnit.MB;
    } else {/*  w w w  .java  2  s.co m*/
        return FileSizeConverter.SizeUnit.KB;
    }
}

From source file:org.alfresco.repo.content.caching.cleanup.CachedContentCleaner.java

public double getSizeFilesDeletedMB() {
    return (double) getSizeFilesDeleted() / FileUtils.ONE_MB;
}