Example usage for java.util.logging Logger severe

List of usage examples for java.util.logging Logger severe

Introduction

In this page you can find the example usage for java.util.logging Logger severe.

Prototype

public void severe(Supplier<String> msgSupplier) 

Source Link

Document

Log a SEVERE message, which is only to be constructed if the logging level is such that the message will actually be logged.

Usage

From source file:edu.harvard.iq.dvn.core.study.StudyServiceBean.java

public void exportStudyFilesToLegacySystem(String lastUpdateTime, String authority) {
    // Get list of studies that have been updated yesterday,
    // and export them to legacy VDC system

    Logger logger = null;

    String exportLogDirStr = System.getProperty("vdc.export.log.dir");
    if (exportLogDirStr == null) {
        System.out.println("Missing system property: vdc.export.log.dir.  Please add to JVM options");
        return;/*w w w . ja v a 2s .  c  om*/
    }
    File exportLogDir = new File(exportLogDirStr);
    if (!exportLogDir.exists()) {
        exportLogDir.mkdir();
    }

    logger = Logger.getLogger("edu.harvard.iq.dvn.core.web.servlet.VDCExportServlet");

    // Everytime export runs, we want to write to a separate log file (handler).
    // So if export has run previously, remove the previous handler
    if (logger.getHandlers() != null && logger.getHandlers().length > 0) {
        int numHandlers = logger.getHandlers().length;
        for (int i = 0; i < numHandlers; i++) {
            logger.removeHandler(logger.getHandlers()[i]);
        }
    }

    SimpleDateFormat formatter = new SimpleDateFormat("yyyy_MM_dd");
    FileHandler handler = null;
    try {
        handler = new FileHandler(
                exportLogDirStr + File.separator + "export_" + formatter.format(new Date()) + ".log");
    } catch (IOException e) {
        throw new EJBException(e);
    }

    // Add handler to the desired logger
    logger.addHandler(handler);

    logger.info("Begin Exporting Studies");
    int studyCount = 0;
    int deletedStudyCount = 0;
    try {

        /* THIS IS LEGACY CODE AND SHOULD BE DELETED
        // For all studies that have been deleted in the dataverse since last export, remove study directory in VDC
                
        String query = "SELECT s from DeletedStudy s where s.authority = '" + authority + "' ";
        List deletedStudies = em.createQuery(query).getResultList();
        for (Iterator it = deletedStudies.iterator(); it.hasNext();) {
        DeletedStudy deletedStudy = (DeletedStudy) it.next();
                
        logger.info("Deleting study " + deletedStudy.getGlobalId());
        Study study = em.find(Study.class, deletedStudy.getId());
        File legacyStudyDir = new File(FileUtil.getLegacyFileDir() + File.separatorChar + study.getAuthority() + File.separatorChar + study.getStudyId());
                
        // Remove files in the directory, then delete the directory.
        File[] studyFiles = legacyStudyDir.listFiles();
        if (studyFiles != null) {
            for (int i = 0; i < studyFiles.length; i++) {
                studyFiles[i].delete();
            }
        }
        legacyStudyDir.delete();
        deletedStudyCount++;
                
        em.remove(deletedStudy);
        }
        */

        // Do export of all studies updated at "lastUpdateTime""

        if (authority == null) {
            authority = vdcNetworkService.find().getAuthority();
        }
        String beginTime = null;
        String endTime = null;
        if (lastUpdateTime == null) {
            Calendar cal = Calendar.getInstance();
            cal.add(Calendar.DAY_OF_YEAR, -1);
            beginTime = new SimpleDateFormat("yyyy-MM-dd").format(cal.getTime()); // Use yesterday as default value
            cal.add(Calendar.DAY_OF_YEAR, 1);
            endTime = new SimpleDateFormat("yyyy-MM-dd").format(cal.getTime());
        } else {
            beginTime = lastUpdateTime;
            Date date = new SimpleDateFormat("yyyy-MM-dd").parse(lastUpdateTime);
            Calendar cal = Calendar.getInstance();
            cal.setTime(date);
            cal.add(Calendar.DAY_OF_YEAR, 1);
            endTime = new SimpleDateFormat("yyyy-MM-dd").format(cal.getTime());
        }
        String query = "SELECT s from Study s where s.authority = '" + authority + "' ";
        query += " and s.lastUpdateTime >'" + beginTime + "'";
        //    query+=" and s.lastUpdateTime <'" +endTime+"'";
        query += " order by s.studyId";
        List updatedStudies = em.createQuery(query).getResultList();

        for (Iterator it = updatedStudies.iterator(); it.hasNext();) {
            Study study = (Study) it.next();
            logger.info("Exporting study " + study.getStudyId());

            exportStudyToLegacySystem(study, authority);
            studyCount++;

        }
    } catch (Exception e) {
        logger.severe(e.getMessage());

        String stackTrace = "StackTrace: \n";
        logger.severe("Exception caused by: " + e + "\n");
        StackTraceElement[] ste = e.getStackTrace();
        for (int m = 0; m < ste.length; m++) {
            stackTrace += ste[m].toString() + "\n";
        }
        logger.severe(stackTrace);
    }

    logger.info("End export, " + studyCount + " studies successfully exported, " + deletedStudyCount
            + " studies deleted.");
}

From source file:es.upm.dit.gsi.barmas.dataset.utils.DatasetSplitter.java

/**
 * @param folds//from   ww  w  . j a  va2 s .c o  m
 * @param minAgents
 * @param maxAgents
 * @param originalDatasetPath
 * @param outputDir
 * @param scenario
 * @param logger
 */
public void splitDataset(int folds, int minAgents, int maxAgents, String originalDatasetPath, String outputDir,
        String scenario, Logger logger) {

    int ratioint = (int) ((1 / (double) folds) * 100);
    double roundedratio = ((double) ratioint) / 100;

    // Look for essentials
    List<String[]> essentials = this.getEssentials(originalDatasetPath, logger);

    for (int fold = 0; fold < folds; fold++) {
        String outputDirWithRatio = outputDir + "/" + roundedratio + "testRatio/iteration-" + fold;
        File dir = new File(outputDirWithRatio);
        if (!dir.exists() || !dir.isDirectory()) {
            dir.mkdirs();
        }

        logger.finer("--> splitDataset()");
        logger.fine("Creating experiment.info...");

        try {

            Instances originalData = this.getDataFromCSV(originalDatasetPath);

            originalData.randomize(new Random());
            originalData.stratify(folds);

            // TestDataSet
            Instances testData = originalData.testCV(folds, fold);
            CSVSaver saver = new CSVSaver();
            ArffSaver arffsaver = new ArffSaver();
            File file = new File(outputDirWithRatio + File.separator + "test-dataset.csv");
            if (!file.exists()) {
                saver.resetOptions();
                saver.setInstances(testData);
                saver.setFile(file);
                saver.writeBatch();
            }

            file = new File(outputDirWithRatio + File.separator + "test-dataset.arff");
            if (!file.exists()) {
                arffsaver.resetOptions();
                arffsaver.setInstances(testData);
                arffsaver.setFile(file);
                arffsaver.writeBatch();
            }

            // BayesCentralDataset
            Instances trainData = originalData.trainCV(folds, fold);
            file = new File(outputDirWithRatio + File.separator + "bayes-central-dataset.csv");
            if (!file.exists()) {
                saver.resetOptions();
                saver.setInstances(trainData);
                saver.setFile(file);
                saver.writeBatch();
                this.copyFileUsingApacheCommonsIO(file,
                        new File(
                                outputDirWithRatio + File.separator + "bayes-central-dataset-noEssentials.csv"),
                        logger);
                CsvWriter w = new CsvWriter(new FileWriter(file, true), ',');
                for (String[] essential : essentials) {
                    w.writeRecord(essential);
                }
                w.close();
            }
            file = new File(outputDirWithRatio + File.separator + "bayes-central-dataset.arff");
            if (!file.exists()) {
                arffsaver.resetOptions();
                arffsaver.setInstances(trainData);
                arffsaver.setFile(file);
                arffsaver.writeBatch();
                this.copyFileUsingApacheCommonsIO(file, new File(
                        outputDirWithRatio + File.separator + "bayes-central-dataset-noEssentials.arff"),
                        logger);
                CsvWriter w = new CsvWriter(new FileWriter(file, true), ',');
                for (String[] essential : essentials) {
                    w.writeRecord(essential);
                }
                w.close();
            }

            // Agent datasets
            CsvReader csvreader = new CsvReader(new FileReader(new File(originalDatasetPath)));
            csvreader.readHeaders();
            String[] headers = csvreader.getHeaders();
            csvreader.close();

            for (int agents = minAgents; agents <= maxAgents; agents++) {
                this.createExperimentInfoFile(folds, agents, originalDatasetPath, outputDirWithRatio, scenario,
                        logger);
                HashMap<String, CsvWriter> writers = new HashMap<String, CsvWriter>();
                String agentsDatasetsDir = outputDirWithRatio + File.separator + agents + "agents";
                HashMap<String, CsvWriter> arffWriters = new HashMap<String, CsvWriter>();
                File f = new File(agentsDatasetsDir);
                if (!f.isDirectory()) {
                    f.mkdirs();
                }
                Instances copy = new Instances(trainData);
                copy.delete();
                for (int i = 0; i < agents; i++) {
                    String fileName = agentsDatasetsDir + File.separator + "agent-" + i + "-dataset.csv";
                    file = new File(fileName);
                    if (!file.exists()) {
                        CsvWriter writer = new CsvWriter(new FileWriter(fileName), ',');
                        writer.writeRecord(headers);
                        writers.put("AGENT" + i, writer);
                    }
                    fileName = agentsDatasetsDir + File.separator + "agent-" + i + "-dataset.arff";
                    file = new File(fileName);
                    if (!file.exists()) {
                        arffsaver.resetOptions();
                        arffsaver.setInstances(copy);
                        arffsaver.setFile(new File(fileName));
                        arffsaver.writeBatch();
                        CsvWriter arffwriter = new CsvWriter(new FileWriter(fileName, true), ',');
                        arffWriters.put("AGENT" + i, arffwriter);
                    }

                    logger.fine("AGENT" + i + " dataset created in csv and arff formats.");
                }
                // Append essentials to all
                for (String[] essential : essentials) {
                    for (CsvWriter wr : writers.values()) {
                        wr.writeRecord(essential);
                    }
                    for (CsvWriter arffwr : arffWriters.values()) {
                        arffwr.writeRecord(essential);
                    }
                }

                int agentCounter = 0;
                for (int j = 0; j < trainData.numInstances(); j++) {
                    Instance instance = trainData.instance(j);
                    CsvWriter writer = writers.get("AGENT" + agentCounter);
                    CsvWriter arffwriter = arffWriters.get("AGENT" + agentCounter);
                    String[] row = new String[instance.numAttributes()];
                    for (int a = 0; a < instance.numAttributes(); a++) {
                        row[a] = instance.stringValue(a);
                    }
                    if (writer != null) {
                        writer.writeRecord(row);
                    }
                    if (arffwriter != null) {
                        arffwriter.writeRecord(row);
                    }
                    agentCounter++;
                    if (agentCounter == agents) {
                        agentCounter = 0;
                    }
                }

                for (CsvWriter wr : writers.values()) {
                    wr.close();
                }
                for (CsvWriter arffwr : arffWriters.values()) {
                    arffwr.close();
                }
            }

        } catch (Exception e) {
            logger.severe("Exception while splitting dataset. ->");
            logger.severe(e.getMessage());
            System.exit(1);
        }

        logger.finest("Dataset for fold " + fold + " created.");
    }

    logger.finer("<-- splitDataset()");

}

From source file:edu.harvard.iq.dvn.core.web.admin.OptionsPage.java

public String importBatch_action() {
    FileHandler logFileHandler = null;
    Logger importLogger = null;

    if (importBatchDir == null || importBatchDir.equals(""))
        return null;
    try {//from w w  w  .  j  a  v  a2  s.c  o m
        int importFailureCount = 0;
        int fileFailureCount = 0;
        List<Long> studiesToIndex = new ArrayList<Long>();
        //sessionId =  ((HttpSession) FacesContext.getCurrentInstance().getExternalContext().getSession(false)).getId();
        sessionId = "batchimportsession";

        File batchDir = new File(importBatchDir);
        if (batchDir.exists() && batchDir.isDirectory()) {

            // create Logger
            String logTimestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date());
            String dvAlias = vdcService.find(importDVId).getAlias();
            importLogger = Logger.getLogger(
                    "edu.harvard.iq.dvn.core.web.networkAdmin.UtilitiesPage." + dvAlias + "_" + logTimestamp);
            String logFileName = FileUtil.getImportFileDir() + File.separator + "batch_" + dvAlias + "_"
                    + logTimestamp + ".log";
            logFileHandler = new FileHandler(logFileName);
            importLogger.addHandler(logFileHandler);

            importLogger
                    .info("BEGIN BATCH IMPORT (dvId = " + importDVId + ") from directory: " + importBatchDir);

            for (int i = 0; i < batchDir.listFiles().length; i++) {
                File studyDir = batchDir.listFiles()[i];
                if (studyDir.isDirectory()) { // one directory per study
                    importLogger.info("Found study directory: " + studyDir.getName());

                    File xmlFile = null;
                    Map<File, String> filesToUpload = new HashMap();

                    for (int j = 0; j < studyDir.listFiles().length; j++) {
                        File file = studyDir.listFiles()[j];
                        if ("study.xml".equals(file.getName())) {
                            xmlFile = file;
                        } else {
                            addFile(file, "", filesToUpload, importLogger);
                        }
                    }

                    if (xmlFile != null) {
                        try {
                            importLogger.info("Found study.xml and " + filesToUpload.size() + " other "
                                    + (filesToUpload.size() == 1 ? "file." : "files."));
                            // TODO: we need to incorporate the add files step into the same transaction of the import!!!
                            Study study = studyService.importStudy(xmlFile, importFileFormat, importDVId,
                                    getVDCSessionBean().getLoginBean().getUser().getId());
                            study.getLatestVersion().setVersionNote("Study imported via batch import.");
                            importLogger.info("Import of study.xml succeeded: study id = " + study.getId());
                            studiesToIndex.add(study.getId());

                            if (!filesToUpload.isEmpty()) {

                                List<StudyFileEditBean> fileBeans = new ArrayList();
                                for (File file : filesToUpload.keySet()) {
                                    StudyFileEditBean fileBean = new StudyFileEditBean(file,
                                            studyService.generateFileSystemNameSequence(), study);
                                    fileBean.getFileMetadata().setCategory(filesToUpload.get(file));
                                    fileBeans.add(fileBean);
                                }

                                try {
                                    studyFileService.addFiles(study.getLatestVersion(), fileBeans,
                                            getVDCSessionBean().getLoginBean().getUser());
                                    importLogger.info("File upload succeeded.");
                                } catch (Exception e) {
                                    fileFailureCount++;
                                    importLogger.severe("File Upload failed (dir = " + studyDir.getName()
                                            + "): exception message = " + e.getMessage());
                                    logException(e, importLogger);
                                }
                            }

                        } catch (Exception e) {
                            importFailureCount++;
                            importLogger.severe("Import failed (dir = " + studyDir.getName()
                                    + "): exception message = " + e.getMessage());
                            logException(e, importLogger);
                        }

                    } else { // no ddi.xml found in studyDir
                        importLogger.warning("No study.xml file was found in study directory. Skipping... ");
                    }
                } else {
                    importLogger.warning("Found non directory at top level. Skipping... (filename = "
                            + studyDir.getName() + ")");
                }
            }

            // generate status message
            String statusMessage = studiesToIndex.size() + (studiesToIndex.size() == 1 ? " study" : " studies")
                    + " successfully imported";
            statusMessage += (fileFailureCount == 0 ? ""
                    : " (" + fileFailureCount + " of which failed file upload)");
            statusMessage += (importFailureCount == 0 ? "."
                    : "; " + importFailureCount + (importFailureCount == 1 ? " study" : " studies")
                            + " failed import.");

            importLogger.info("COMPLETED BATCH IMPORT: " + statusMessage);

            // now index all studies
            importLogger.info("POST BATCH IMPORT, start calls to index.");
            indexService.updateIndexList(studiesToIndex);
            importLogger.info("POST BATCH IMPORT, calls to index finished.");

            addMessage("importMessage", "Batch Import request completed.");
            addMessage("importMessage", statusMessage);
            addMessage("importMessage", "For more detail see log file at: " + logFileName);

        } else {
            addMessage("importMessage",
                    "Batch Import failed: " + importBatchDir + " does not exist or is not a directory.");
        }
    } catch (Exception e) {
        e.printStackTrace();
        addMessage("importMessage", "Batch Import failed: An unexpected error occurred during processing.");
        addMessage("importMessage", "Exception message: " + e.getMessage());
    } finally {
        if (logFileHandler != null) {
            logFileHandler.close();
            importLogger.removeHandler(logFileHandler);
        }
        //   importBatchDir = "";
    }

    return null;
}