List of usage examples for java.util.logging Logger warning
public void warning(Supplier<String> msgSupplier)
From source file:org.archive.io.arc.ARCRecord.java
/** * Read http header if present. Technique borrowed from HttpClient HttpParse * class. set errors when found./* w ww.jav a 2 s.c om*/ * * @return ByteArrayInputStream with the http header in it or null if no * http header. * @throws IOException */ private InputStream readHttpHeader() throws IOException { // this can be helpful when simply iterating over records, // looking for problems. Logger logger = Logger.getLogger(this.getClass().getName()); ArchiveRecordHeader h = this.getHeader(); // If judged a record that doesn't have an http header, return // immediately. String url = getHeader().getUrl(); if (!url.startsWith("http") || getHeader().getLength() <= MIN_HTTP_HEADER_LENGTH) { return null; } String statusLine; byte[] statusBytes; int eolCharCount = 0; int errOffset = 0; // Read status line, skipping any errant http headers found before it // This allows a larger number of 'corrupt' arcs -- where headers were accidentally // inserted before the status line to be readable while (true) { statusBytes = LaxHttpParser.readRawLine(getIn()); eolCharCount = getEolCharsCount(statusBytes); if (eolCharCount <= 0) { throw new RecoverableIOException("Failed to read http status where one was expected: " + ((statusBytes == null) ? "" : new String(statusBytes))); } statusLine = EncodingUtil.getString(statusBytes, 0, statusBytes.length - eolCharCount, ARCConstants.DEFAULT_ENCODING); // If a null or DELETED break immediately if ((statusLine == null) || statusLine.startsWith("DELETED")) { break; } // If it's actually the status line, break, otherwise continue skipping any // previous header values if (!statusLine.contains(":") && StatusLine.startsWithHTTP(statusLine)) { break; } // Add bytes read to error "offset" to add to position errOffset += statusBytes.length; } if (errOffset > 0) { this.incrementPosition(errOffset); } if ((statusLine == null) || !StatusLine.startsWithHTTP(statusLine)) { if (statusLine.startsWith("DELETED")) { // Some old ARCs have deleted records like following: // http://vireo.gatech.edu:80/ebt-bin/nph-dweb/dynaweb/SGI_Developer/SGITCL_PG/@Generic__BookTocView/11108%3Btd%3D2 130.207.168.42 19991010131803 text/html 29202 // DELETED_TIME=20000425001133_DELETER=Kurt_REASON=alexalist // (follows ~29K spaces) // For now, throw a RecoverableIOException so if iterating over // records, we keep going. TODO: Later make a legitimate // ARCRecord from the deleted record rather than throw // exception. throw new DeletedARCRecordIOException(statusLine); } else { this.errors.add(ArcRecordErrors.HTTP_STATUS_LINE_INVALID); } } try { this.httpStatus = new StatusLine(statusLine); } catch (IOException e) { logger.warning(e.getMessage() + " at offset: " + h.getOffset()); this.errors.add(ArcRecordErrors.HTTP_STATUS_LINE_EXCEPTION); } // Save off all bytes read. Keep them as bytes rather than // convert to strings so we don't have to worry about encodings // though this should never be a problem doing http headers since // its all supposed to be ascii. ByteArrayOutputStream baos = new ByteArrayOutputStream(statusBytes.length + 4 * 1024); baos.write(statusBytes); // Now read rest of the header lines looking for the separation // between header and body. for (byte[] lineBytes = null; true;) { lineBytes = LaxHttpParser.readRawLine(getIn()); eolCharCount = getEolCharsCount(lineBytes); if (eolCharCount <= 0) { if (getIn().available() == 0) { httpHeaderBytesRead += statusBytes.length; logger.warning("HTTP header truncated at offset: " + h.getOffset()); this.errors.add(ArcRecordErrors.HTTP_HEADER_TRUNCATED); this.setEor(true); break; } else { throw new IOException( "Failed reading http headers: " + ((lineBytes != null) ? new String(lineBytes) : null)); } } else { httpHeaderBytesRead += lineBytes.length; } // Save the bytes read. baos.write(lineBytes); if ((lineBytes.length - eolCharCount) <= 0) { // We've finished reading the http header. break; } } byte[] headerBytes = baos.toByteArray(); // Save off where body starts. this.getMetaData().setContentBegin(headerBytes.length); ByteArrayInputStream bais = new ByteArrayInputStream(headerBytes); if (!bais.markSupported()) { throw new IOException("ByteArrayInputStream does not support mark"); } bais.mark(headerBytes.length); // Read the status line. Don't let it into the parseHeaders function. // It doesn't know what to do with it. bais.read(statusBytes, 0, statusBytes.length); this.httpHeaders = LaxHttpParser.parseHeaders(bais, ARCConstants.DEFAULT_ENCODING); this.getMetaData().setStatusCode(Integer.toString(getStatusCode())); bais.reset(); return bais; }
From source file:edu.harvard.iq.dvn.core.web.networkAdmin.UtilitiesPage.java
public String importBatch_action() { FileHandler logFileHandler = null; Logger importLogger = null; if (importBatchDir == null || importBatchDir.equals("")) return null; try {//from w w w . j av a 2 s.c om int importFailureCount = 0; int fileFailureCount = 0; List<Long> studiesToIndex = new ArrayList<Long>(); //sessionId = ((HttpSession) FacesContext.getCurrentInstance().getExternalContext().getSession(false)).getId(); File batchDir = new File(importBatchDir); if (batchDir.exists() && batchDir.isDirectory()) { // create Logger String logTimestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date()); String dvAlias = vdcService.find(importDVId).getAlias(); importLogger = Logger.getLogger( "edu.harvard.iq.dvn.core.web.networkAdmin.UtilitiesPage." + dvAlias + "_" + logTimestamp); String logFileName = FileUtil.getImportFileDir() + File.separator + "batch_" + dvAlias + "_" + logTimestamp + ".log"; logFileHandler = new FileHandler(logFileName); importLogger.addHandler(logFileHandler); importLogger .info("BEGIN BATCH IMPORT (dvId = " + importDVId + ") from directory: " + importBatchDir); for (int i = 0; i < batchDir.listFiles().length; i++) { File studyDir = batchDir.listFiles()[i]; if (studyDir.isDirectory()) { // one directory per study importLogger.info("Found study directory: " + studyDir.getName()); File xmlFile = null; Map<File, String> filesToUpload = new HashMap(); for (int j = 0; j < studyDir.listFiles().length; j++) { File file = studyDir.listFiles()[j]; if ("study.xml".equals(file.getName())) { xmlFile = file; } else { addFile(file, "", filesToUpload); } } if (xmlFile != null) { try { importLogger.info("Found study.xml and " + filesToUpload.size() + " other " + (filesToUpload.size() == 1 ? "file." : "files.")); // TODO: we need to incorporate the add files step into the same transaction of the import!!! Study study = studyService.importStudy(xmlFile, importFileFormat, importDVId, getVDCSessionBean().getLoginBean().getUser().getId()); study.getLatestVersion().setVersionNote("Study imported via batch import."); importLogger.info("Import of study.xml succeeded: study id = " + study.getId()); studiesToIndex.add(study.getId()); if (!filesToUpload.isEmpty()) { List<StudyFileEditBean> fileBeans = new ArrayList(); for (File file : filesToUpload.keySet()) { StudyFileEditBean fileBean = new StudyFileEditBean(file, studyService.generateFileSystemNameSequence(), study); fileBean.getFileMetadata().setCategory(filesToUpload.get(file)); fileBeans.add(fileBean); } try { studyFileService.addFiles(study.getLatestVersion(), fileBeans, getVDCSessionBean().getLoginBean().getUser()); importLogger.info("File upload succeeded."); } catch (Exception e) { fileFailureCount++; importLogger.severe("File Upload failed (dir = " + studyDir.getName() + "): exception message = " + e.getMessage()); logException(e, importLogger); } } } catch (Exception e) { importFailureCount++; importLogger.severe("Import failed (dir = " + studyDir.getName() + "): exception message = " + e.getMessage()); logException(e, importLogger); } } else { // no ddi.xml found in studyDir importLogger.warning("No study.xml file was found in study directory. Skipping... "); } } else { importLogger.warning("Found non directory at top level. Skipping... (filename = " + studyDir.getName() + ")"); } } // generate status message String statusMessage = studiesToIndex.size() + (studiesToIndex.size() == 1 ? " study" : " studies") + " successfully imported"; statusMessage += (fileFailureCount == 0 ? "" : " (" + fileFailureCount + " of which failed file upload)"); statusMessage += (importFailureCount == 0 ? "." : "; " + importFailureCount + (importFailureCount == 1 ? " study" : " studies") + " failed import."); importLogger.info("COMPLETED BATCH IMPORT: " + statusMessage); // now index all studies importLogger.info("POST BATCH IMPORT, start calls to index."); indexService.updateIndexList(studiesToIndex); importLogger.info("POST BATCH IMPORT, calls to index finished."); addMessage("importMessage", "Batch Import request completed."); addMessage("importMessage", statusMessage); addMessage("importMessage", "For more detail see log file at: " + logFileName); } else { addMessage("importMessage", "Batch Import failed: " + importBatchDir + " does not exist or is not a directory."); } } catch (Exception e) { e.printStackTrace(); addMessage("importMessage", "Batch Import failed: An unexpected error occurred during processing."); addMessage("importMessage", "Exception message: " + e.getMessage()); } finally { if (logFileHandler != null) { logFileHandler.close(); importLogger.removeHandler(logFileHandler); } // importBatchDir = ""; } return null; }
From source file:edu.harvard.iq.dvn.core.web.admin.OptionsPage.java
public String importBatch_action() { FileHandler logFileHandler = null; Logger importLogger = null; if (importBatchDir == null || importBatchDir.equals("")) return null; try {/* w w w. jav a 2 s . c o m*/ int importFailureCount = 0; int fileFailureCount = 0; List<Long> studiesToIndex = new ArrayList<Long>(); //sessionId = ((HttpSession) FacesContext.getCurrentInstance().getExternalContext().getSession(false)).getId(); sessionId = "batchimportsession"; File batchDir = new File(importBatchDir); if (batchDir.exists() && batchDir.isDirectory()) { // create Logger String logTimestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date()); String dvAlias = vdcService.find(importDVId).getAlias(); importLogger = Logger.getLogger( "edu.harvard.iq.dvn.core.web.networkAdmin.UtilitiesPage." + dvAlias + "_" + logTimestamp); String logFileName = FileUtil.getImportFileDir() + File.separator + "batch_" + dvAlias + "_" + logTimestamp + ".log"; logFileHandler = new FileHandler(logFileName); importLogger.addHandler(logFileHandler); importLogger .info("BEGIN BATCH IMPORT (dvId = " + importDVId + ") from directory: " + importBatchDir); for (int i = 0; i < batchDir.listFiles().length; i++) { File studyDir = batchDir.listFiles()[i]; if (studyDir.isDirectory()) { // one directory per study importLogger.info("Found study directory: " + studyDir.getName()); File xmlFile = null; Map<File, String> filesToUpload = new HashMap(); for (int j = 0; j < studyDir.listFiles().length; j++) { File file = studyDir.listFiles()[j]; if ("study.xml".equals(file.getName())) { xmlFile = file; } else { addFile(file, "", filesToUpload, importLogger); } } if (xmlFile != null) { try { importLogger.info("Found study.xml and " + filesToUpload.size() + " other " + (filesToUpload.size() == 1 ? "file." : "files.")); // TODO: we need to incorporate the add files step into the same transaction of the import!!! Study study = studyService.importStudy(xmlFile, importFileFormat, importDVId, getVDCSessionBean().getLoginBean().getUser().getId()); study.getLatestVersion().setVersionNote("Study imported via batch import."); importLogger.info("Import of study.xml succeeded: study id = " + study.getId()); studiesToIndex.add(study.getId()); if (!filesToUpload.isEmpty()) { List<StudyFileEditBean> fileBeans = new ArrayList(); for (File file : filesToUpload.keySet()) { StudyFileEditBean fileBean = new StudyFileEditBean(file, studyService.generateFileSystemNameSequence(), study); fileBean.getFileMetadata().setCategory(filesToUpload.get(file)); fileBeans.add(fileBean); } try { studyFileService.addFiles(study.getLatestVersion(), fileBeans, getVDCSessionBean().getLoginBean().getUser()); importLogger.info("File upload succeeded."); } catch (Exception e) { fileFailureCount++; importLogger.severe("File Upload failed (dir = " + studyDir.getName() + "): exception message = " + e.getMessage()); logException(e, importLogger); } } } catch (Exception e) { importFailureCount++; importLogger.severe("Import failed (dir = " + studyDir.getName() + "): exception message = " + e.getMessage()); logException(e, importLogger); } } else { // no ddi.xml found in studyDir importLogger.warning("No study.xml file was found in study directory. Skipping... "); } } else { importLogger.warning("Found non directory at top level. Skipping... (filename = " + studyDir.getName() + ")"); } } // generate status message String statusMessage = studiesToIndex.size() + (studiesToIndex.size() == 1 ? " study" : " studies") + " successfully imported"; statusMessage += (fileFailureCount == 0 ? "" : " (" + fileFailureCount + " of which failed file upload)"); statusMessage += (importFailureCount == 0 ? "." : "; " + importFailureCount + (importFailureCount == 1 ? " study" : " studies") + " failed import."); importLogger.info("COMPLETED BATCH IMPORT: " + statusMessage); // now index all studies importLogger.info("POST BATCH IMPORT, start calls to index."); indexService.updateIndexList(studiesToIndex); importLogger.info("POST BATCH IMPORT, calls to index finished."); addMessage("importMessage", "Batch Import request completed."); addMessage("importMessage", statusMessage); addMessage("importMessage", "For more detail see log file at: " + logFileName); } else { addMessage("importMessage", "Batch Import failed: " + importBatchDir + " does not exist or is not a directory."); } } catch (Exception e) { e.printStackTrace(); addMessage("importMessage", "Batch Import failed: An unexpected error occurred during processing."); addMessage("importMessage", "Exception message: " + e.getMessage()); } finally { if (logFileHandler != null) { logFileHandler.close(); importLogger.removeHandler(logFileHandler); } // importBatchDir = ""; } return null; }