List of usage examples for java.nio.file Files move
public static Path move(Path source, Path target, CopyOption... options) throws IOException
From source file:com.stimulus.archiva.store.MessageStore.java
public byte[] writeEmail(Email message, File file, boolean compress, boolean encrypt) throws MessageStoreException { logger.debug("writeEmail"); OutputStream fos = null;/*from ww w . j a v a 2 s. com*/ try { MessageDigest sha = MessageDigest.getInstance("SHA-1"); fos = getRawMessageOutputStream(file, compress, encrypt); DigestOutputStream dos = new DigestOutputStream(fos, sha); message.writeTo(dos); byte[] digest = sha.digest(); if (digest == null) { throw new MessageStoreException("failed to generate email digest. digest is null.", logger, ChainedException.Level.DEBUG); } return digest; } catch (Exception e) { if (file.exists()) { boolean deleted = file.delete(); if (!deleted) { try { //Mod Start Seolhwa.kim 2017-04-13 //file.renameTo(File.createTempFile("ma", "tmp")); File tmpfile = File.createTempFile("ma", "tmp"); Files.move(Paths.get(file.getAbsolutePath()), Paths.get(tmpfile.getAbsolutePath()), StandardCopyOption.REPLACE_EXISTING); //Mod End Seolhwa.kim 2017-04-13 Config.getFileSystem().getTempFiles().markForDeletion(file); } catch (Exception e3) { } } } throw new MessageStoreException("failed to write email {filename='" + file.getAbsolutePath() + "'", e, logger); } finally { try { if (fos != null) fos.close(); } catch (Exception e) { logger.error("failed to close email file:" + e.getMessage()); } } /* try { //System.out.println("WRITEMAIL:"+message.getContent()+"XXXXXXXXXXXXXXXXXXXXXX"); FileOutputStream fos2 = new FileOutputStream("c:\\test.eml"); message.writeTo(fos2); fos2.close(); } catch (Exception e) { e.printStackTrace(); logger.error(e); }*/ }
From source file:org.tellervo.desktop.bulkdataentry.command.PopulateFromODKCommand.java
/** * Rename a file, ensuring the new file is unique. If a file with the suggested new name already exists then an index is added to the end of the filename. * /*from w w w . j av a 2 s . com*/ * @param fileToRename * @param newname * @return * @throws IOException */ private Path renameFile(File fileToRename, String newname) throws IOException { File newFile = getUniqueFilename(new File(fileToRename.getParent(), newname)); log.debug("Renaming file from '" + fileToRename.toString() + "' to '" + newFile + "'"); return Files.move(fileToRename.toPath(), newFile.toPath(), StandardCopyOption.ATOMIC_MOVE); }
From source file:misc.FileHandler.java
/** * Writes the given synchronization version to the version file which * belongs to the given file name. The version file is created, if it does * not exist yet. Returns whether the version file was successfully written. * /* ww w . j a va2 s. co m*/ * @param version * the version number up to which all actions have been executed. * Must be at least <code>0</code>. * @param clientRoot * the complete path to the client's root directory. Must exist. * @param syncRoot * the complete path to the synchronization root directory. Must * exist. * @param pathLocal * the local path to synchronize containing an access bundle. * Must be relative to <code>clientRoot</code>. May not be * <code>null</code>. * @return <code>true</code>, if the version file was successfully written. * Otherwise, <code>false</code>. */ public static boolean writeVersion(int version, Path clientRoot, Path syncRoot, Path pathLocal) { if (version < 0) { throw new IllegalArgumentException("version must be at least 0!"); } if ((clientRoot == null) || !Files.isDirectory(clientRoot)) { throw new IllegalArgumentException("clientRoot must be an existing directory!"); } if ((syncRoot == null) || !Files.isDirectory(syncRoot)) { throw new IllegalArgumentException("syncRoot must be an existing directory!"); } if (pathLocal == null) { throw new NullPointerException("pathLocal may not be null!"); } boolean success = false; Path arbitraryFileName = Paths.get(pathLocal.toString(), "arbitrary"); Path accessBundleDirectory = FileHandler.getAccessBundleDirectory(clientRoot, arbitraryFileName); if (accessBundleDirectory != null) { Path versionFile = Paths.get(syncRoot.toString(), accessBundleDirectory.toString(), SynchronizationExecutor.VERSION_FILE); FileHandler.makeParentDirs(versionFile); /* * Write the new version into a temporary file and rename it to the * version file. */ Path tempFile = FileHandler.getTempFile(versionFile); if (tempFile != null) { try (BufferedWriter writer = Files.newBufferedWriter(tempFile, Coder.CHARSET);) { writer.write(String.valueOf(version)); writer.write('\n'); writer.flush(); Files.move(tempFile, versionFile, StandardCopyOption.REPLACE_EXISTING); success = true; } catch (IOException e) { Logger.logError(e); } finally { if (tempFile != null) { try { Files.deleteIfExists(tempFile); } catch (IOException e) { Logger.logError(e); } } } } } return success; }
From source file:org.apache.storm.localizer.AsyncLocalizer.java
private LocalizedResource downloadBlob(Map<String, Object> conf, String key, File localFile, String user, boolean uncompress, boolean isUpdate) throws AuthorizationException, KeyNotFoundException, IOException { ClientBlobStore blobstore = null;// w ww . java2 s . c o m try { blobstore = getClientBlobStore(); long nimbusBlobVersion = ServerUtils.nimbusVersionOfBlob(key, blobstore); long oldVersion = ServerUtils.localVersionOfBlob(localFile.toString()); FileOutputStream out = null; PrintWriter writer = null; int numTries = 0; String localizedPath = localFile.toString(); String localFileWithVersion = ServerUtils.constructBlobWithVersionFileName(localFile.toString(), nimbusBlobVersion); String localVersionFile = ServerUtils.constructVersionFileName(localFile.toString()); String downloadFile = localFileWithVersion; if (uncompress) { // we need to download to temp file and then unpack into the one requested downloadFile = new File(localFile.getParent(), TO_UNCOMPRESS + localFile.getName()).toString(); } while (numTries < blobDownloadRetries) { out = new FileOutputStream(downloadFile); numTries++; try { if (!ServerUtils.canUserReadBlob(blobstore.getBlobMeta(key), user, conf)) { throw new AuthorizationException(user + " does not have READ access to " + key); } InputStreamWithMeta in = blobstore.getBlob(key); byte[] buffer = new byte[1024]; int len; while ((len = in.read(buffer)) >= 0) { out.write(buffer, 0, len); } out.close(); in.close(); if (uncompress) { ServerUtils.unpack(new File(downloadFile), new File(localFileWithVersion)); LOG.debug("uncompressed " + downloadFile + " to: " + localFileWithVersion); } // Next write the version. LOG.info("Blob: " + key + " updated with new Nimbus-provided version: " + nimbusBlobVersion + " local version was: " + oldVersion); // The false parameter ensures overwriting the version file, not appending writer = new PrintWriter(new BufferedWriter(new FileWriter(localVersionFile, false))); writer.println(nimbusBlobVersion); writer.close(); try { setBlobPermissions(conf, user, localFileWithVersion); setBlobPermissions(conf, user, localVersionFile); // Update the key.current symlink. First create tmp symlink and do // move of tmp to current so that the operation is atomic. String tmp_uuid_local = java.util.UUID.randomUUID().toString(); LOG.debug("Creating a symlink @" + localFile + "." + tmp_uuid_local + " , " + "linking to: " + localFile + "." + nimbusBlobVersion); File uuid_symlink = new File(localFile + "." + tmp_uuid_local); Files.createSymbolicLink(uuid_symlink.toPath(), Paths.get(ServerUtils .constructBlobWithVersionFileName(localFile.toString(), nimbusBlobVersion))); File current_symlink = new File( ServerUtils.constructBlobCurrentSymlinkName(localFile.toString())); Files.move(uuid_symlink.toPath(), current_symlink.toPath(), ATOMIC_MOVE); } catch (IOException e) { // if we fail after writing the version file but before we move current link we need to // restore the old version to the file try { PrintWriter restoreWriter = new PrintWriter( new BufferedWriter(new FileWriter(localVersionFile, false))); restoreWriter.println(oldVersion); restoreWriter.close(); } catch (IOException ignore) { } throw e; } String oldBlobFile = localFile + "." + oldVersion; try { // Remove the old version. Note that if a number of processes have that file open, // the OS will keep the old blob file around until they all close the handle and only // then deletes it. No new process will open the old blob, since the users will open the // blob through the "blob.current" symlink, which always points to the latest version of // a blob. Remove the old version after the current symlink is updated as to not affect // anyone trying to read it. if ((oldVersion != -1) && (oldVersion != nimbusBlobVersion)) { LOG.info("Removing an old blob file:" + oldBlobFile); Files.delete(Paths.get(oldBlobFile)); } } catch (IOException e) { // At this point we have downloaded everything and moved symlinks. If the remove of // old fails just log an error LOG.error("Exception removing old blob version: " + oldBlobFile); } break; } catch (AuthorizationException ae) { // we consider this non-retriable exceptions if (out != null) { out.close(); } new File(downloadFile).delete(); throw ae; } catch (IOException | KeyNotFoundException e) { if (out != null) { out.close(); } if (writer != null) { writer.close(); } new File(downloadFile).delete(); if (uncompress) { try { FileUtils.deleteDirectory(new File(localFileWithVersion)); } catch (IOException ignore) { } } if (!isUpdate) { // don't want to remove existing version file if its an update new File(localVersionFile).delete(); } if (numTries < blobDownloadRetries) { LOG.error("Failed to download blob, retrying", e); } else { throw e; } } } return new LocalizedResource(key, localizedPath, uncompress); } finally { if (blobstore != null) { blobstore.shutdown(); } } }
From source file:org.apache.solr.handler.IndexFetcher.java
/** * The tlog files are moved from the tmp dir to the tlog dir as an atomic filesystem operation. * A backup of the old directory is maintained. If the directory move fails, it will try to revert back the original * tlog directory.//from w w w. j ava2 s.com */ private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) { Path tlogDir = FileSystems.getDefault().getPath(solrCore.getUpdateHandler().getUpdateLog().getLogDir()); Path backupTlogDir = FileSystems.getDefault().getPath(tlogDir.getParent().toAbsolutePath().toString(), tmpTlogDir.getName()); try { Files.move(tlogDir, backupTlogDir, StandardCopyOption.ATOMIC_MOVE); } catch (IOException e) { SolrException.log(LOG, "Unable to rename: " + tlogDir + " to: " + backupTlogDir, e); return false; } Path src = FileSystems.getDefault().getPath(backupTlogDir.toAbsolutePath().toString(), tmpTlogDir.getName()); try { Files.move(src, tlogDir, StandardCopyOption.ATOMIC_MOVE); } catch (IOException e) { SolrException.log(LOG, "Unable to rename: " + src + " to: " + tlogDir, e); // In case of error, try to revert back the original tlog directory try { Files.move(backupTlogDir, tlogDir, StandardCopyOption.ATOMIC_MOVE); } catch (IOException e2) { // bad, we were not able to revert back the original tlog directory throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to rename: " + backupTlogDir + " to: " + tlogDir); } return false; } return true; }
From source file:io.minio.MinioClient.java
/** * Gets object's data in the given bucket and stores it to given file name. * * </p><b>Example:</b><br> * <pre>{@code minioClient.getObject("my-bucketname", "my-objectname", "photo.jpg"); }</pre> * * @param bucketName Bucket name.//from w w w . ja va 2s. co m * @param objectName Object name in the bucket. * @param fileName file name. * * @throws InvalidBucketNameException upon invalid bucket name is given * @throws NoResponseException upon no response from server * @throws IOException upon connection error * @throws XmlPullParserException upon parsing response xml * @throws ErrorResponseException upon unsuccessful execution * @throws InternalException upon internal library error */ public void getObject(String bucketName, String objectName, String fileName) throws InvalidBucketNameException, NoSuchAlgorithmException, InsufficientDataException, IOException, InvalidKeyException, NoResponseException, XmlPullParserException, ErrorResponseException, InternalException, InvalidArgumentException { Path filePath = Paths.get(fileName); boolean fileExists = Files.exists(filePath); if (fileExists && !Files.isRegularFile(filePath)) { throw new InvalidArgumentException(fileName + ": not a regular file"); } ObjectStat objectStat = statObject(bucketName, objectName); long length = objectStat.length(); String etag = objectStat.etag(); String tempFileName = fileName + "." + etag + ".part.minio"; Path tempFilePath = Paths.get(tempFileName); boolean tempFileExists = Files.exists(tempFilePath); if (tempFileExists && !Files.isRegularFile(tempFilePath)) { throw new IOException(tempFileName + ": not a regular file"); } long tempFileSize = 0; if (tempFileExists) { tempFileSize = Files.size(tempFilePath); if (tempFileSize > length) { Files.delete(tempFilePath); tempFileExists = false; tempFileSize = 0; } } if (fileExists) { long fileSize = Files.size(filePath); if (fileSize == length) { // already downloaded. nothing to do return; } else if (fileSize > length) { throw new InvalidArgumentException( "'" + fileName + "': object size " + length + " is smaller than file size " + fileSize); } else if (!tempFileExists) { // before resuming the download, copy filename to tempfilename Files.copy(filePath, tempFilePath); tempFileSize = fileSize; tempFileExists = true; } } InputStream is = null; OutputStream os = null; try { is = getObject(bucketName, objectName, tempFileSize); os = Files.newOutputStream(tempFilePath, StandardOpenOption.CREATE, StandardOpenOption.APPEND); long bytesWritten = ByteStreams.copy(is, os); is.close(); os.close(); if (bytesWritten != length - tempFileSize) { throw new IOException(tempFileName + ": unexpected data written. expected = " + (length - tempFileSize) + ", written = " + bytesWritten); } Files.move(tempFilePath, filePath, StandardCopyOption.REPLACE_EXISTING); } finally { if (is != null) { is.close(); } if (os != null) { os.close(); } } }
From source file:org.dcm4chee.proxy.forward.ForwardFiles.java
private Collection<ForwardTask> scanFiles(ProxyAEExtension proxyAEE, String calledAET, File[] files) { HashMap<String, ForwardTask> map = new HashMap<String, ForwardTask>(4); for (File file : files) { try {//from w ww . j a va2 s .c om if (lock.tryLock(100, TimeUnit.MILLISECONDS)) { try { if (file.exists()) { String prevFilePath = file.getPath(); File snd = new File(prevFilePath + ".snd"); try { Files.move(file.toPath(), snd.toPath(), StandardCopyOption.REPLACE_EXISTING); LOG.debug("Successfully renamed {} to {}", prevFilePath, snd.getPath()); LOG.debug("Adding file {} to forward tasks ", snd.getPath()); addFileToFwdTaskMap(proxyAEE, calledAET, snd, map); LOG.debug( "Successfully added file {} to forward tasks , proceeding with scheduled send", snd.getPath()); } catch (Exception e) { LOG.error( "Error moving {} to {}. Skip file for now and try again on next scheduler run. - {}", prevFilePath, snd.getPath(), e); if (!file.exists() && snd.exists()) if (snd.renameTo(file)) LOG.debug("Rename {} to {}", snd.getPath(), file.getPath()); else LOG.debug("Error renaming {} to {}", snd.getPath(), file.getPath()); else if (snd.exists() && file.exists()) try { Files.delete(snd.toPath()); } catch (Exception e1) { LOG.error( "Unable to delete file {} after failed rename from {} to {} - {}", snd, prevFilePath, snd.getPath(), e1); } } } } finally { lock.unlock(); } } } catch (InterruptedException e) { LOG.error("Error acquiring lock for file scan and rename {}", e); } } return map.values(); }
From source file:com.ut.healthelink.service.impl.transactionInManagerImpl.java
@Override public boolean loadBatch(Integer batchId) { Integer batchStatusId = 38;/* w ww .j ava 2 s.co m*/ List<Integer> errorStatusIds = Arrays.asList(11, 13, 14, 16); String processFolderPath = "/bowlink/loadFiles/"; try { /** insert log**/ try { //log user activity UserActivity ua = new UserActivity(); ua.setUserId(0); ua.setFeatureId(0); ua.setAccessMethod("System"); ua.setActivity("System Loaded Batch"); ua.setBatchUploadId(batchId); usermanager.insertUserLog(ua); } catch (Exception ex) { ex.printStackTrace(); System.err.println("loadBatch - insert user log" + ex.toString()); } //first thing we do is get details, then we set it to 4 batchUploads batch = getBatchDetails(batchId); // set batch to SBL - 38 updateBatchStatus(batchId, batchStatusId, "startDateTime"); // let's clear all tables first as we are starting over Integer sysErrors = clearTransactionTables(batchId, false); String errorMessage = "Load errors, please contact admin to review logs"; // loading batch will take it all the way to loaded (9) status for if (sysErrors > 0) { insertProcessingError(5, null, batchId, null, null, null, null, false, false, "Error cleaning out transaction tables. Batch cannot be loaded."); updateBatchStatus(batchId, 39, "endDateTime"); return false; } String loadTableName = "uploadTable_" + batch.getId(); //make sure old table is dropped if exists Integer sysError = dropLoadTable(loadTableName); sysError = sysError + createLoadTable(loadTableName); //we need to index loadTable sysError = sysError + indexLoadTable(loadTableName); fileSystem dir = new fileSystem(); dir.setDirByName("/"); //2. we load data with my sql String actualFileName = null; String newfilename = null; /** * decoded files will always be in loadFiles folder with UTBatchName * */ // all files are Base64 encoded at this point String encodedFilePath = dir.setPath(batch.getFileLocation()); String encodedFileName = batch.getoriginalFileName(); File encodedFile = new File(encodedFilePath + encodedFileName); String decodedFilePath = dir.setPath(processFolderPath); String decodedFileName = batch.getutBatchName(); String decodedFileExt = batch.getoriginalFileName() .substring(batch.getoriginalFileName().lastIndexOf(".")); String strDecode = ""; try { strDecode = filemanager.decodeFileToBase64Binary(encodedFile); } catch (Exception ex) { ex.printStackTrace(); strDecode = ""; sysErrors = 1; processingSysErrorId = 17; } if (!strDecode.equalsIgnoreCase("")) { //we write and decode file filemanager.writeFile((decodedFilePath + decodedFileName + decodedFileExt), strDecode); actualFileName = (decodedFilePath + decodedFileName + decodedFileExt); /* If batch is set up for CCD input then we need to translate it to a pipe-delimited text file. */ /** * here we need to check if we should change file to xml or hr for org * sometimes org will send hl7 files over or .out or some other extension, they all need to be .hr * all ccd file will need to end in xml * */ //so we check decodedFileName and change it to the proper extension if need be String chagneToExtension = ""; String processFileName = batch.getoriginalFileName(); /** * For configId of 0, we need to check to see if org has hr or ccd * if configId is not 0, we pull up the extension type and rename file * if we find more than one file extension set up for org we reject them them * file extension will be 4 (hr) or 9 (ccd) * info we have from batchUpload - transportMethodId, configId, orgId * */ if (batch.getConfigId() != 0) { configurationTransport ct = configurationtransportmanager .getTransportDetails(batch.getConfigId()); if (ct.getfileType() == 9) { chagneToExtension = "xml"; } else if (ct.getfileType() == 4) { chagneToExtension = "hr"; } } else if (batch.getConfigId() == 0) { //should restrict this to only 4/9 //see if the users has any 4/9 fileType, we don't need to worry about changing extension if org doesn't List<configurationTransport> ctList = configurationtransportmanager .getConfigurationTransportFileExtByFileType(batch.getOrgId(), batch.gettransportMethodId(), null, Arrays.asList(1), true, false); if (ctList.size() > 1) { //it is ok to have multiple if they are not using file type 4/9, so we check again List<configurationTransport> ctList2 = configurationtransportmanager .getConfigurationTransportFileExtByFileType(batch.getOrgId(), batch.gettransportMethodId(), Arrays.asList(4, 9), Arrays.asList(1), true, false); if (ctList2.size() != 0) { //they have multiple file types defined along with hr or ccd, we fail them //clean up File tempLoadFile = new File(actualFileName); if (tempLoadFile.exists()) { tempLoadFile.delete(); } //log updateBatchStatus(batchId, 7, "endDateTime"); insertProcessingError(18, null, batchId, null, null, null, null, false, false, "Multiple file types were found for transport method."); //get out of loop return false; } } else if (ctList.size() == 1) { if (ctList.get(0).getfileType() == 9) { chagneToExtension = "xml"; } else if (ctList.get(0).getfileType() == 4) { chagneToExtension = "hr"; } } } if (chagneToExtension != "") { processFileName = batch.getutBatchName() + "." + chagneToExtension; //we overwrite file //old file is here actualFileName; //new file is the same name with diff extension File actualFile = new File(actualFileName); File fileWithNewExtension = new File(decodedFilePath + processFileName); Path fileWithOldExtension = actualFile.toPath(); Path renamedFile = fileWithNewExtension.toPath(); Files.move(fileWithOldExtension, renamedFile, REPLACE_EXISTING); } if (processFileName.endsWith(".xml")) { newfilename = ccdtotxt.TranslateCCDtoTxt(decodedFilePath, decodedFileName, batch.getOrgId()); actualFileName = (decodedFilePath + newfilename); //we remove temp load file File tempLoadFile = new File(decodedFilePath + processFileName); if (tempLoadFile.exists()) { tempLoadFile.delete(); } /* if the original file name is a HL7 file (".hr") then we are going to translate it to a pipe-delimited text file. */ } else if (processFileName.endsWith(".hr")) { newfilename = hl7toTxt.TranslateHl7toTxt(decodedFilePath, decodedFileName, batch.getOrgId()); actualFileName = (decodedFilePath + newfilename); //we remove temp load file File tempLoadFile = new File(decodedFilePath + processFileName); if (tempLoadFile.exists()) { tempLoadFile.delete(); } } //at this point, hl7 and hr are in unencoded plain text if (actualFileName.endsWith(".txt") || actualFileName.endsWith(".csv")) { sysError = sysError + insertLoadData(batch.getId(), batch.getDelimChar(), actualFileName, loadTableName, batch.isContainsHeaderRow()); File actualFile = new File(actualFileName); //we are archiving it File archiveFile = new File(dir.setPath(archivePath) + batch.getutBatchName() + "_dec" + actualFileName.substring(actualFileName.lastIndexOf("."))); Path archive = archiveFile.toPath(); Path actual = actualFile.toPath(); //we keep original file in archive folder Files.move(actual, archive, REPLACE_EXISTING); } //3. we update batchId, loadRecordId sysError = sysError + updateLoadTable(loadTableName, batch.getId()); // 4. we insert into transactionIn - status of invalid (11), batchId, loadRecordId sysError = sysError + loadTransactionIn(loadTableName, batch.getId()); //5. we insert into transactionInRecords - we select transactionIn batchId, transactionInId sysError = sysError + loadTransactionInRecords(batch.getId()); //6. we match loadRecordId and update transactionInRecords's F1-F255 data sysError = sysError + loadTransactionInRecordsData(loadTableName); //7. we delete loadTable sysError = sysError + dropLoadTable(loadTableName); //8. we see how if the file only has one upload type so we don't need to parse every line // if we only have one, we update the entire table if (batch.getConfigId() != null && batch.getConfigId() != 0) { // we update entire transactionIN with configId sysError = sysError + updateConfigIdForBatch(batch.getId(), batch.getConfigId()); } else { //1. we get all configs for user - user might not have permission to submit but someone else in org does List<configurationMessageSpecs> configurationMessageSpecs = configurationtransportmanager .getConfigurationMessageSpecsForOrgTransport(batch.getOrgId(), batch.gettransportMethodId(), false); //2. we get all rows for batch List<transactionInRecords> tInRecords = getTransactionInRecordsForBatch(batch.getId()); if (tInRecords == null || tInRecords.size() == 0) { updateBatchStatus(batchId, 7, "endDateTime"); insertProcessingError(7, null, batchId, null, null, null, null, false, false, "No valid transactions were found for batch."); return false; } if (configurationMessageSpecs == null || configurationMessageSpecs.size() == 0) { insertProcessingError(6, null, batchId, null, null, null, null, false, false, "No valid configurations were found for loading batch."); // update all transactions to invalid updateBatchStatus(batchId, 7, "endDateTime"); updateTransactionStatus(batchId, 0, 0, 11); return false; } //if we only have one and it is set to 0,we can default, else we loop through if (configurationMessageSpecs.size() == 1 && configurationMessageSpecs.get(0).getmessageTypeCol() == 0) { sysError = sysError + updateConfigIdForBatch(batch.getId(), configurationMessageSpecs.get(0).getconfigId()); } else { //3 loop through each config and mass update by config for (configurationMessageSpecs cms : configurationMessageSpecs) { //we update by config if (updateConfigIdForCMS(batchId, cms) != 0) { sysError++; insertProcessingError(processingSysErrorId, null, batch.getId(), null, null, null, null, false, false, "System error while checking configuration"); //system error - break break; } } // now we looped through config, we flag the invalid records. sysError = flagInvalidConfig(batchId); //we also need to flag and error the ones that a user is not supposed to upload for sysError = flagNoPermissionConfig(batch); } } //we populate transactionTranslatedIn sysError = sysError + loadTransactionTranslatedIn(batchId); //update data in transactionTranslatedIn resetTransactionTranslatedIn(batchId, true); int transactionId = 0; // we trim all values trimFieldValues(batchId, false, transactionId, true); //now that we have our config, we will apply pre-processing cw and macros to manipulate our data //1. find all configs for batch, loop and process List<Integer> configIds = getConfigIdsForBatch(batchId, false, transactionId); for (Integer configId : configIds) { //we need to run all checks before insert regardless * /** * we are reordering 1. cw/macro, 2. required and 3. validate * */ // 1. grab the configurationDataTranslations and run cw/macros List<configurationDataTranslations> dataTranslations = configurationManager .getDataTranslationsWithFieldNo(configId, 2); //pre processing for (configurationDataTranslations cdt : dataTranslations) { if (cdt.getCrosswalkId() != 0) { sysError = sysError + processCrosswalk(configId, batchId, cdt, false, transactionId); } else if (cdt.getMacroId() != 0) { sysError = sysError + processMacro(configId, batchId, cdt, false, transactionId); } } } } if (sysErrors > 0) { insertProcessingError(processingSysErrorId, null, batchId, null, null, null, null, false, false, errorMessage); updateBatchStatus(batchId, 39, "endDateTime"); return false; } //we check handling here for rejecting entire batch List<configurationTransport> batchHandling = getHandlingDetailsByBatch(batchId); // if entire batch failed and have no configIds, there will be no error handling found if (getRecordCounts(batchId, Arrays.asList(11), false) == getRecordCounts(batchId, new ArrayList<Integer>(), false)) { //entire batch failed, we reject entire batch updateRecordCounts(batchId, errorStatusIds, false, "errorRecordCount"); updateRecordCounts(batchId, new ArrayList<Integer>(), false, "totalRecordCount"); updateBatchStatus(batchId, 7, "endDateTime"); return false; } else if (batchHandling.size() != 1) { //TODO email admin to fix problem insertProcessingError(8, null, batchId, null, null, null, null, false, false, "Multiple or no file handling found, please check auto-release and error handling configurations"); updateRecordCounts(batchId, new ArrayList<Integer>(), false, "totalRecordCount"); // do we count pass records as errors? updateRecordCounts(batchId, errorStatusIds, false, "errorRecordCount"); updateBatchStatus(batchId, 39, "endDateTime"); return false; } if (batchHandling.size() == 1) { //reject submission on error if (batchHandling.get(0).geterrorHandling() == 3) { // at this point we will only have invalid records if (getRecordCounts(batchId, errorStatusIds, false) > 0) { updateBatchStatus(batchId, 7, "endDateTime"); updateRecordCounts(batchId, errorStatusIds, false, "errorRecordCount"); //update loaded to rejected updateTransactionStatus(batchId, 0, 9, 13); return false; } } } updateRecordCounts(batchId, errorStatusIds, false, "errorRecordCount"); updateRecordCounts(batchId, new ArrayList<Integer>(), false, "totalRecordCount"); //at the end of loaded, we update to PR updateTransactionStatus(batchId, 0, 9, 10); updateTransactionTargetStatus(batchId, 0, 9, 10); batchStatusId = 36; //loaded without targets } catch (Exception ex) { insertProcessingError(processingSysErrorId, null, batchId, null, null, null, null, false, false, ("loadBatch error " + ex.getLocalizedMessage())); batchStatusId = 39; } try { updateBatchStatus(batchId, batchStatusId, "endDateTime"); updateRecordCounts(batchId, new ArrayList<Integer>(), false, "totalRecordCount"); // do we count pass records as errors? updateRecordCounts(batchId, errorStatusIds, false, "errorRecordCount"); } catch (Exception ex1) { Logger.getLogger(this.getClass().getName()).log(Level.SEVERE, null, ("loadBatch error at updating batch status - " + ex1)); return false; } return true; }
From source file:com.ut.healthelink.service.impl.transactionOutManagerImpl.java
/** * The 'RhapsodyTargetFile' function will get the Rhapsody details and move the file to the * output folder defined in /*from ww w .j av a 2 s. co m*/ * * @param batchId The id of the batch to move to Rhapsody folder */ private void RhapsodyTargetFile(int batchId, configurationTransport transportDetails) { try { /* Update the status of the batch to locked */ transactionOutDAO.updateBatchStatus(batchId, 22); List<transactionTarget> targets = transactionOutDAO.getTransactionsByBatchDLId(batchId); if (!targets.isEmpty()) { for (transactionTarget target : targets) { /* Need to update the uploaded batch status */ transactionInManager.updateBatchStatus(target.getbatchUploadId(), 22, ""); /* Need to update the uploaded batch transaction status */ transactionInManager.updateTransactionStatus(target.getbatchUploadId(), target.gettransactionInId(), 0, 37); /* Update the downloaded batch transaction status */ transactionOutDAO.updateTargetTransasctionStatus(target.getbatchDLId(), 37); } } /* get the batch details */ batchDownloads batchDetails = transactionOutDAO.getBatchDetails(batchId); /* Get the Rhapsody Details */ configurationRhapsodyFields rhapsodyDetails = configurationTransportManager .getTransRhapsodyDetailsPush(transportDetails.getId()); // the file is in output folder already, we need to rebuild path and move it fileSystem dir = new fileSystem(); String filelocation = transportDetails.getfileLocation(); filelocation = filelocation.replace("/bowlink/", ""); dir.setDirByName(filelocation); File sourceFile = new File(dir.getDir() + batchDetails.getoutputFIleName()); File targetFile = new File( dir.setPathFromRoot(rhapsodyDetails.getDirectory()) + batchDetails.getoutputFIleName()); //move the file over and update the status to complete Files.move(sourceFile.toPath(), targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING); transactionOutDAO.updateBatchStatus(batchId, 23); for (transactionTarget target : targets) { /* Need to update the uploaded batch status */ transactionInManager.updateBatchStatus(target.getbatchUploadId(), 23, ""); /* Need to update the uploaded batch transaction status */ transactionInManager.updateTransactionStatus(target.getbatchUploadId(), target.gettransactionInId(), 0, 20); /* Update the downloaded batch transaction status */ transactionOutDAO.updateTargetTransasctionStatus(target.getbatchDLId(), 20); } } catch (Exception e) { e.printStackTrace(); System.err.println( "RhapsodyTargetFile - Error occurred trying to move a batch target. batchId: " + batchId); } }