Example usage for java.nio.channels FileChannel close

List of usage examples for java.nio.channels FileChannel close

Introduction

In this page you can find the example usage for java.nio.channels FileChannel close.

Prototype

public final void close() throws IOException 

Source Link

Document

Closes this channel.

Usage

From source file:com.android.mms.transaction.RetrieveTransaction.java

public int checkPduResult() {
    if (!mPduFile.exists()) {
        Log.e(MmsApp.TXN_TAG, "checkPduResult MMS Fail, no pduFile = " + mPduFile);
        return SmsManager.MMS_ERROR_UNSPECIFIED;
    }//from   ww  w  .j a v  a 2s. c o m
    FileChannel channel = null;
    FileInputStream fs = null;
    RetrieveConf retrieveConf;
    try {
        fs = new FileInputStream(mPduFile);
        channel = fs.getChannel();
        ByteBuffer byteBuffer = ByteBuffer.allocate((int) channel.size());
        while ((channel.read(byteBuffer)) > 0) {
            // do nothing
            // System.out.println("reading");
        }
        final GenericPdu pdu = (new PduParser(byteBuffer.array(),
                PduParserUtil.shouldParseContentDisposition(mSubId))).parse();
        if (pdu == null || !(pdu instanceof RetrieveConf)) {
            Log.e(MmsApp.TXN_TAG, "checkPduResult: invalid parsed PDU");
            return SmsManager.MMS_ERROR_UNSPECIFIED;
        }
        retrieveConf = (RetrieveConf) pdu;
        byte[] messageId = retrieveConf.getMessageId();
        MmsLog.d(MmsApp.TXN_TAG, "checkPduResult retrieveConf.messageId = " + new String(messageId));

        // Store the downloaded message
        PduPersister persister = PduPersister.getPduPersister(mContext);
        Uri messageUri = persister.persist(pdu, Telephony.Mms.Inbox.CONTENT_URI, true/*createThreadId*/,
                true/*groupMmsEnabled*/, null/*preOpenedFiles*/);
        if (messageUri == null) {
            Log.e(MmsApp.TXN_TAG, "checkPduResult: can not persist message");
            return SmsManager.MMS_ERROR_UNSPECIFIED;
        }
        mMessageUri = messageUri.toString();
        // Update some of the properties of the message
        final ContentValues values = new ContentValues();
        values.put(Telephony.Mms.DATE, System.currentTimeMillis() / 1000L);
        values.put(Telephony.Mms.READ, 0);
        values.put(Telephony.Mms.SEEN, 0);
        String creator = ActivityThread.currentPackageName();
        if (!TextUtils.isEmpty(creator)) {
            values.put(Telephony.Mms.CREATOR, creator);
        }
        values.put(Telephony.Mms.SUBSCRIPTION_ID, mSubId);
        if (SqliteWrapper.update(mContext, mContext.getContentResolver(), messageUri, values, null/*where*/,
                null/*selectionArg*/) != 1) {
            Log.e(MmsApp.TXN_TAG, "persistIfRequired: can not update message");
        }
        // Delete the corresponding NotificationInd
        SqliteWrapper.delete(mContext, mContext.getContentResolver(), Telephony.Mms.CONTENT_URI,
                LOCATION_SELECTION,
                new String[] { Integer.toString(PduHeaders.MESSAGE_TYPE_NOTIFICATION_IND), mContentLocation });
        return Activity.RESULT_OK;
    } catch (IOException e) {
        e.printStackTrace();
        return SmsManager.MMS_ERROR_UNSPECIFIED;
    } catch (MmsException e) {
        e.printStackTrace();
        return SmsManager.MMS_ERROR_UNSPECIFIED;
    } catch (SQLiteException e) {
        e.printStackTrace();
        return SmsManager.MMS_ERROR_UNSPECIFIED;
    } catch (RuntimeException e) {
        e.printStackTrace();
        return SmsManager.MMS_ERROR_UNSPECIFIED;
    } finally {
        if (mPduFile != null) {
            mPduFile.delete();
        }
        try {
            if (channel != null) {
                channel.close();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        try {
            if (fs != null) {
                fs.close();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

From source file:com.qubole.rubix.core.RemoteReadRequestChain.java

public Integer call() throws IOException {
    Thread.currentThread().setName(threadName);
    checkState(isLocked, "Trying to execute Chain without locking");

    if (readRequests.size() == 0) {
        return 0;
    }//from   w w w .  j a  v a 2 s . c o  m

    RandomAccessFile localFile = null;
    FileChannel fc = null;

    try {
        localFile = new RandomAccessFile(localFilename, "rw");
        fc = localFile.getChannel();
        for (ReadRequest readRequest : readRequests) {
            log.debug(String.format("Executing ReadRequest: [%d, %d, %d, %d, %d]",
                    readRequest.getBackendReadStart(), readRequest.getBackendReadEnd(),
                    readRequest.getActualReadStart(), readRequest.getActualReadEnd(),
                    readRequest.getDestBufferOffset()));
            inputStream.seek(readRequest.backendReadStart);
            MappedByteBuffer mbuf = fc.map(FileChannel.MapMode.READ_WRITE, readRequest.backendReadStart,
                    readRequest.getBackendReadLength());
            log.debug(String.format("Mapped file from %d till length %d", readRequest.backendReadStart,
                    readRequest.getBackendReadLength()));
            /*
             * MappedByteBuffer does not provide backing byte array, so cannot write directly to it via FSDataOutputStream.read
             * Instead, download to normal destination buffer (+offset buffer to get block boundaries) and then copy to MappedByteBuffer
             */

            int prefixBufferLength = (int) (readRequest.getActualReadStart()
                    - readRequest.getBackendReadStart());
            int suffixBufferLength = (int) (readRequest.getBackendReadEnd() - readRequest.getActualReadEnd());
            log.debug(
                    String.format("PrefixLength: %d SuffixLength: %d", prefixBufferLength, suffixBufferLength));

            // TODO: use single byte buffer for all three streams
            /* TODO: also GC cost can be lowered by shared buffer pool, a small one.
            IOUtils.copyLarge method. A single 4kB byte buffer can be used to copy whole file
                    
              */
            if (prefixBufferLength > 0) {
                byte[] prefixBuffer = new byte[prefixBufferLength];
                log.debug(String.format("Trying to Read %d bytes into prefix buffer", prefixBufferLength));
                totalPrefixRead += readAndCopy(prefixBuffer, 0, mbuf, prefixBufferLength);
                log.debug(String.format("Read %d bytes into prefix buffer", prefixBufferLength));
            }
            log.debug(String.format("Trying to Read %d bytes into destination buffer",
                    readRequest.getActualReadLength()));
            int readBytes = readAndCopy(readRequest.getDestBuffer(), readRequest.destBufferOffset, mbuf,
                    readRequest.getActualReadLength());
            totalRequestedRead += readBytes;
            log.debug(String.format("Read %d bytes into destination buffer", readBytes));
            if (suffixBufferLength > 0) {
                // If already in reading actually required data we get a eof, then there should not have been a suffix request
                checkState(readBytes == readRequest.getActualReadLength(),
                        "Acutal read less than required, still requested for suffix");
                byte[] suffixBuffer = new byte[suffixBufferLength];
                log.debug(String.format("Trying to Read %d bytes into suffix buffer", suffixBufferLength));
                totalSuffixRead += readAndCopy(suffixBuffer, 0, mbuf, suffixBufferLength);
                log.debug(String.format("Read %d bytes into suffix buffer", suffixBufferLength));
            }
        }
    } finally {
        if (fc != null) {
            fc.close();
        }
        if (localFile != null) {
            localFile.close();
        }
    }
    log.info(String.format("Read %d bytes from remote file, added %d to destination buffer",
            totalPrefixRead + totalRequestedRead + totalSuffixRead, totalRequestedRead));
    return totalRequestedRead;
}

From source file:MyZone.Settings.java

public byte[] readXML(String filename) {
    byte[] readIn = null;
    FileChannel channel = null;
    FileLock lock = null;/*from ww w.j a  va  2 s.  com*/
    FileInputStream fis = null;
    ByteArrayOutputStream baos = null;
    try {
        File file = new File(filename);
        if (!file.exists()) {
            return null;
        }
        fis = new FileInputStream(file);
        channel = fis.getChannel();
        while ((lock = channel.tryLock(0L, Long.MAX_VALUE, true)) == null) {
            Thread.yield();
        }
        baos = new ByteArrayOutputStream();
        byte[] b = new byte[1024];
        ByteBuffer buf = ByteBuffer.wrap(b);
        int count = 0;
        long fileLength = file.length();
        while (fileLength > 0) {
            count = channel.read(buf);
            if (count >= 0) {
                fileLength -= count;
                baos.write(b, 0, count);
                buf.rewind();
            }
        }
        readIn = baos.toByteArray();
    } catch (Exception e) {
        if (DEBUG) {
            e.printStackTrace();
        }
        readIn = null;
    } finally {
        try {
            if (lock != null) {
                lock.release();
            }
            if (channel != null) {
                channel.close();
            }
            if (fis != null) {
                fis.close();
            }
            if (baos != null) {
                baos.close();
            }
        } catch (Exception e) {
            if (DEBUG) {
                e.printStackTrace();
            }
            readIn = null;
        }
    }
    return readIn;
}

From source file:com.linkedin.databus.core.DbusEventBuffer.java

public static ByteBuffer allocateByteBuffer(int size, ByteOrder byteOrder, AllocationPolicy allocationPolicy,
        boolean restoreBuffers, File mmapSessionDir, File mmapFile) {
    ByteBuffer buffer = null;//from ww w  . j a v  a2 s  .c  o m

    switch (allocationPolicy) {
    case HEAP_MEMORY:
        buffer = ByteBuffer.allocate(size).order(byteOrder);
        break;
    case DIRECT_MEMORY:
        buffer = ByteBuffer.allocateDirect(size).order(byteOrder);
        break;
    case MMAPPED_MEMORY:
    default:
        // expect that dirs are already created and initialized
        if (!mmapSessionDir.exists()) {
            throw new RuntimeException(mmapSessionDir.getAbsolutePath() + " doesn't exist");
        }

        if (restoreBuffers) {
            if (!mmapFile.exists()) {
                LOG.warn("restoreBuffers is true, but file " + mmapFile + " doesn't exist");
            } else {
                LOG.info("restoring buffer from " + mmapFile);
            }
        } else {
            if (mmapFile.exists()) {
                // this path should never happen (only if the generated session ID accidentally matches a previous one)
                LOG.info("restoreBuffers is false; deleting existing mmap file " + mmapFile);
                if (!mmapFile.delete()) {
                    throw new RuntimeException("deletion of file failed: " + mmapFile.getAbsolutePath());
                }
            }
            LOG.info("restoreBuffers is false => will delete new mmap file " + mmapFile + " on exit");
            mmapFile.deleteOnExit(); // in case we don't need files later.
        }

        try {
            FileChannel rwChannel = new RandomAccessFile(mmapFile, "rw").getChannel();
            buffer = rwChannel.map(FileChannel.MapMode.READ_WRITE, 0, size).order(byteOrder);
            rwChannel.close();
        } catch (FileNotFoundException e) {
            throw new RuntimeException(
                    "[should never happen!] can't find mmap file/dir " + mmapFile.getAbsolutePath(), e);
        } catch (IOException e) {
            throw new RuntimeException("unable to initialize mmap file " + mmapFile, e);
        }
    }
    return buffer;
}

From source file:com.clustercontrol.agent.log.LogfileMonitor.java

/**
 * ?/*from  ww w .  j  a  v  a2s  .c o  m*/
 */
private boolean openFile() {
    m_log.info("openFile : filename=" + status.rsFilePath.getName());

    closeFile();

    FileChannel fc = null;

    // 
    try {
        if (checkPrefix())
            status.rotate();

        fc = FileChannel.open(Paths.get(getFilePath()), StandardOpenOption.READ);

        long filesize = fc.size();
        if (filesize > LogfileMonitorConfig.fileMaxSize) {
            // ????????
            // message.log.agent.1={0}?
            // message.log.agent.3=??????
            // message.log.agent.5={0} byte?
            String[] args1 = { getFilePath() };
            String[] args2 = { String.valueOf(filesize) };
            sendMessage(PriorityConstant.TYPE_INFO, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE_SIZE_EXCEEDED_UPPER_BOUND.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args1) + ", "
                            + MessageConstant.MESSAGE_LOG_FILE_SIZE_BYTE.getMessage(args2));
        }

        // ??
        // ?open?init=true??
        // ??open?init=false???
        fc.position(status.position);

        fileChannel = fc;

        return true;
    } catch (FileNotFoundException e) {
        m_log.info("openFile : " + e.getMessage());
        if (m_initFlag) {
            // ????????
            // message.log.agent.1={0}?
            // message.log.agent.2=???????
            String[] args = { getFilePath() };
            sendMessage(PriorityConstant.TYPE_INFO, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE_NOT_FOUND.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args));
        }

        return false;
    } catch (SecurityException e) {
        m_log.info("openFile : " + e.getMessage());
        if (m_initFlag) {
            // ??
            // message.log.agent.1={0}?
            // message.log.agent.4=????????
            String[] args = { getFilePath() };
            sendMessage(PriorityConstant.TYPE_WARNING, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FAILED_TO_READ_FILE.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args) + "\n" + e.getMessage());
        }
        return false;
    } catch (IOException e) {
        m_log.info("openFile : " + e.getMessage());
        if (m_initFlag) {
            // ??
            // message.log.agent.1={0}?
            // message.log.agent.4=????????
            String[] args = { getFilePath() };
            sendMessage(PriorityConstant.TYPE_INFO, MessageConstant.AGENT.getMessage(),
                    MessageConstant.MESSAGE_LOG_FAILED_TO_READ_FILE.getMessage(),
                    MessageConstant.MESSAGE_LOG_FILE.getMessage(args));
        }
        return false;
    } finally {
        // ??????????????
        if (fc != null && fileChannel == null) {
            try {
                fc.close();
            } catch (IOException e) {
                m_log.warn(e.getMessage(), e);
            }
        }
        m_initFlag = false;
    }
}

From source file:MyZone.Settings.java

private boolean saveXML(String filename, Document dom) {
    File file = null;//from  ww w .  ja  v  a 2s.c  o  m
    FileChannel channel = null;
    FileLock lock = null;
    FileOutputStream toWrite = null;
    try {
        if (!new File(filename).exists()) {
            String dirName = filename.substring(0, filename.lastIndexOf("/"));
            boolean success = (new File(dirName)).mkdirs();
            if (!success && !(new File(dirName)).exists()) {
                return false;
            }
            OutputFormat format = new OutputFormat(dom);
            format.setIndenting(true);
            file = new File(filename);
            toWrite = new FileOutputStream(file, false);
            XMLSerializer serializer = new XMLSerializer(toWrite, format);
            serializer.serialize(dom);
        } else {
            file = new File(filename);
            toWrite = new FileOutputStream(file, false);
            channel = toWrite.getChannel();
            while ((lock = channel.tryLock()) == null) {
                Thread.yield();
            }
            OutputFormat format = new OutputFormat(dom);
            format.setIndenting(true);
            XMLSerializer serializer = new XMLSerializer(toWrite, format);
            serializer.serialize(dom);
            return true;
        }
    } catch (Exception e) {
        if (DEBUG) {
            e.printStackTrace();
        }
        return false;
    } finally {
        try {
            if (lock != null) {
                lock.release();
            }
            if (channel != null) {
                channel.close();
            }
            if (toWrite != null) {
                toWrite.flush();
                toWrite.close();
            }
        } catch (Exception e) {
            if (DEBUG) {
                e.printStackTrace();
            }
            return false;
        }
    }
    return false;
}

From source file:org.apache.hadoop.hdfs.server.datanode.FSDataset.java

/**
 * Copies a file as fast as possible. Tries to do a hardlink instead of a copy
 * if the hardlink parameter is specified.
 *
 * @param src//  w ww  .  j  av  a2s.c om
 *          the source file for copying
 * @param dst
 *          the destination file for copying
 * @param hardlink
 *          whether or not to attempt a hardlink
 * @throws IOException
 */
public void copyFile(File src, File dst, boolean hardlink) throws IOException {

    if (src == null || dst == null) {
        throw new IOException("src/dst file is null");
    }

    try {
        if (hardlink && shouldHardLinkBlockCopy) {
            // Remove destination before hard linking, since this file might already
            // exist and a hardlink would fail as a result.
            if (dst.exists()) {
                if (!dst.delete()) {
                    throw new IOException("Deletion of file : " + dst + " failed");
                }
            }
            NativeIO.link(src, dst);
            DataNode.LOG.info("Hard Link Created from : " + src + " to " + dst);
            return;
        }
    } catch (IOException e) {
        DataNode.LOG
                .warn("Hard link failed from : " + src + " to " + dst + " continuing with regular file copy");
    }

    FileChannel input = null;
    FileChannel output = null;
    try {
        // This improves copying performance a lot, it uses native buffers
        // for copying.
        input = new FileInputStream(src).getChannel();
        output = new FileOutputStream(dst).getChannel();
        if (input == null || output == null) {
            throw new IOException("Could not create file channels for src : " + src + " dst : " + dst);
        }
        long bytesLeft = input.size();
        long position = 0;
        while (bytesLeft > 0) {
            long bytesWritten = output.transferFrom(input, position, bytesLeft);
            bytesLeft -= bytesWritten;
            position += bytesWritten;
        }
        if (datanode.syncOnClose) {
            output.force(true);
        }
    } finally {
        if (input != null) {
            input.close();
        }
        if (output != null) {
            output.close();
        }
    }
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void commitBucketTableToDisk() throws BucketTableManagerException {
    File currentFile = null;//from   w w w  .  j a  va 2 s.c om
    FileChannel fileChannel = null;
    ByteBuffer headerBuffer = null;
    try {
        logger.warn("Start commit bucket table...");
        if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty())
            throw new BucketTableManagerException("commit requested while there is no requested checkpoint");
        currentFile = getLatestCommitedFile();
        File nextFile = getNextFile(getLatestCommitedFile());
        fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel();
        // Write header with empty checkpoint 
        headerBuffer = ByteBuffer.allocate(HEADERSIZE);
        fileChannel.position(0L);
        headerBuffer.putInt(MAGICSTART);
        headerBuffer.putLong(mapSize);
        // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done
        NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle
        lastCheckPoint.putNeedlePointerToBuffer(headerBuffer);
        headerBuffer.putInt(MAGICEND);
        headerBuffer.flip(); // truncate buffer
        fileChannel.write(headerBuffer);
        // Now writes buffers
        for (int i = 0; i < nbBuffers; i++) {
            bucketTable.prepareBufferForWriting(i);
            int written = fileChannel.write(bucketTable.getBuffer(i));
            if (written < bucketTable.getBuffer(i).limit())
                throw new BucketTableManagerException("Incomplete write for bucket table file "
                        + nextFile.getName() + ", expected " + mapSize + HEADERSIZE);
            // else
            // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            try {
                Thread.sleep(10);
            } catch (Throwable th) {

            }
        }
        // Writes second magic number
        ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
        buffer.rewind();
        buffer.limit(INTSIZE);
        buffer.putInt(MAGICSTART);
        buffer.rewind();
        fileChannel.write(buffer);
        // Write Needle Log Info
        Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator();
        while (it.hasNext()) {
            buffer.rewind();
            buffer.limit(NeedleLogInfo.INFOSIZE);
            NeedleLogInfo nli = it.next();
            nli.putNeedleLogInfo(buffer, true);
            int written = fileChannel.write(buffer);
            if (written < NeedleLogInfo.INFOSIZE)
                throw new BucketTableManagerException(
                        "Incomplete write for bucket table file, writing log infos " + nextFile.getName());
        }
        // Writes checkpoint
        headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE);
        headerBuffer.rewind();
        headerBuffer.limit(NeedlePointer.POINTERSIZE);
        // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ;
        bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write
        headerBuffer.rewind();
        // fileChannel.force(false) ;
        if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) {
            throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName());
        }
        fileChannel.force(true);
        fileChannel.close();
        if (!nextFile.renameTo(getCommittedFile(nextFile)))
            throw new BucketTableManagerException(
                    "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName());

        logger.warn("Committed bucket table.");
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed writting bucket table", ie);
    } finally {
        headerBuffer = null; //May ease garbage collection
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (Exception ex) {
                throw new BucketTableManagerException("Failed to close file channel", ex);
            }
        }
    }
    try {
        if (currentFile != null) {
            if (!currentFile.delete())
                logger.error("Failed deleting previous bucket table" + currentFile.getName());
        }
    } catch (Throwable th) {
        logger.error("Failed deleting previous bucket table" + currentFile.getName(), th);
    }
}

From source file:com.MainFiles.Functions.java

public int createStan() {
    int x = 0;//  w  w  w.  j a  va 2  s .  co m

    String filename = COUNT_FILE;
    File inwrite = new File(filename);

    // Get a file channel for the file
    try {
        FileChannel channel = new RandomAccessFile(inwrite, "rw").getChannel();
        // Use the file channel to create a lock on the file.
        // This method blocks until it can retrieve the lock.
        FileLock lock = channel.lock();
        //  if(!inwrite.exists()) {
        String s = "";
        try {
            int fileSize = (int) channel.size();
            //    System.out.println("int is" + fileSize);
            ByteBuffer bafa = ByteBuffer.allocate(fileSize);
            int numRead = 0;
            while (numRead >= 0) {
                numRead = channel.read(bafa);
                bafa.rewind();
                for (int i = 0; i < numRead; i++) {
                    int b = (int) bafa.get();
                    char c = (char) b;
                    s = s + c;
                }
            }

            x = Integer.parseInt(s);
            if (x > 999999) {
                x = 100000;
            } else if (x < 100000) {
                x = 100000;
            }
            x = ++x;
            String xx = String.valueOf(x);
            byte[] yy = xx.getBytes();
            channel.truncate(0);
            channel.write(ByteBuffer.wrap(yy));
            // channel.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
        lock.release();
        // Close the file
        channel.close();
    } catch (FileNotFoundException e) {
        String message = "The file " + inwrite.getName() + " does not exist. So no input can be written on it";
        System.out.println(message);
        e.printStackTrace();
        //log to error file
    } catch (IOException e) {
        System.out.println("Problem writing to the logfile " + inwrite.getName());

    }

    filename = "";
    return x;
}

From source file:edu.harvard.iq.dataverse.ingest.IngestServiceBean.java

public void addFiles(DatasetVersion version, List<DataFile> newFiles) {
    if (newFiles != null && newFiles.size() > 0) {
        // final check for duplicate file names; 
        // we tried to make the file names unique on upload, but then 
        // the user may have edited them on the "add files" page, and 
        // renamed FOOBAR-1.txt back to FOOBAR.txt...

        checkForDuplicateFileNamesFinal(version, newFiles);

        Dataset dataset = version.getDataset();

        try {/*from ww w  .  j a  v a2s. c  om*/
            if (dataset.getFileSystemDirectory() != null && !Files.exists(dataset.getFileSystemDirectory())) {
                /* Note that "createDirectories()" must be used - not 
                 * "createDirectory()", to make sure all the parent 
                 * directories that may not yet exist are created as well. 
                 */

                Files.createDirectories(dataset.getFileSystemDirectory());
            }
        } catch (IOException dirEx) {
            logger.severe("Failed to create study directory " + dataset.getFileSystemDirectory().toString());
            return;
            // TODO:
            // Decide how we are communicating failure information back to 
            // the page, and what the page should be doing to communicate
            // it to the user - if anything. 
            // -- L.A. 
        }

        if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
            for (DataFile dataFile : newFiles) {
                String tempFileLocation = getFilesTempDirectory() + "/" + dataFile.getStorageIdentifier();

                FileMetadata fileMetadata = dataFile.getFileMetadatas().get(0);
                String fileName = fileMetadata.getLabel();

                // temp dbug line
                System.out.println("ADDING FILE: " + fileName + "; for dataset: " + dataset.getGlobalId());

                // These are all brand new files, so they should all have 
                // one filemetadata total. -- L.A. 
                boolean metadataExtracted = false;

                if (ingestableAsTabular(dataFile)) {
                    /*
                     * Note that we don't try to ingest the file right away - 
                     * instead we mark it as "scheduled for ingest", then at 
                     * the end of the save process it will be queued for async. 
                     * ingest in the background. In the meantime, the file 
                     * will be ingested as a regular, non-tabular file, and 
                     * appear as such to the user, until the ingest job is
                     * finished with the Ingest Service.
                     */
                    dataFile.SetIngestScheduled();
                } else if (fileMetadataExtractable(dataFile)) {

                    try {
                        // FITS is the only type supported for metadata 
                        // extraction, as of now. -- L.A. 4.0 
                        dataFile.setContentType("application/fits");
                        metadataExtracted = extractMetadata(tempFileLocation, dataFile, version);
                    } catch (IOException mex) {
                        logger.severe("Caught exception trying to extract indexable metadata from file "
                                + fileName + ",  " + mex.getMessage());
                    }
                    if (metadataExtracted) {
                        logger.fine("Successfully extracted indexable metadata from file " + fileName);
                    } else {
                        logger.fine("Failed to extract indexable metadata from file " + fileName);
                    }
                }

                // Try to save the file in its permanent location: 

                String storageId = dataFile.getStorageIdentifier().replaceFirst("^tmp://", "");

                Path tempLocationPath = Paths.get(getFilesTempDirectory() + "/" + storageId);
                WritableByteChannel writeChannel = null;
                FileChannel readChannel = null;

                try {

                    DataFileIO dataAccess = dataFile.getAccessObject();

                    /* 
                     This commented-out code demonstrates how to copy bytes
                     from a local InputStream (or a readChannel) into the
                     writable byte channel of a Dataverse DataAccessIO object:
                    */
                    /*
                    dataAccess.open(DataAccessOption.WRITE_ACCESS);
                                                
                    writeChannel = dataAccess.getWriteChannel();
                    readChannel = new FileInputStream(tempLocationPath.toFile()).getChannel();
                                                
                    long bytesPerIteration = 16 * 1024; // 16K bytes
                    long start = 0;
                    while ( start < readChannel.size() ) {
                    readChannel.transferTo(start, bytesPerIteration, writeChannel);
                    start += bytesPerIteration;
                    }
                    */

                    /* 
                    But it's easier to use this convenience method from the
                    DataAccessIO: 
                            
                    (if the underlying storage method for this file is 
                    local filesystem, the DataAccessIO will simply copy 
                    the file using Files.copy, like this:
                            
                    Files.copy(tempLocationPath, dataAccess.getFileSystemLocation(), StandardCopyOption.REPLACE_EXISTING);
                    */

                    dataAccess.copyPath(tempLocationPath);

                    // Set filesize in bytes
                    // 
                    dataFile.setFilesize(dataAccess.getSize());

                } catch (IOException ioex) {
                    logger.warning("Failed to save the file, storage id " + dataFile.getStorageIdentifier());
                } finally {
                    if (readChannel != null) {
                        try {
                            readChannel.close();
                        } catch (IOException e) {
                        }
                    }
                    if (writeChannel != null) {
                        try {
                            writeChannel.close();
                        } catch (IOException e) {
                        }
                    }
                }

                // delete the temporary file: 
                try {
                    logger.fine("Will attempt to delete the temp file " + tempLocationPath.toString());
                    // also, delete a temporary thumbnail image file, if exists:
                    // (TODO: probably not a very good style, that the size of the thumbnail 
                    // is hard-coded here; it may change in the future...)
                    Path tempThumbnailPath = Paths.get(tempLocationPath.toString() + ".thumb64");
                    Files.delete(tempLocationPath);
                    if (tempThumbnailPath.toFile().exists()) {
                        Files.delete(tempThumbnailPath);
                    }
                } catch (IOException ex) {
                    // (non-fatal - it's just a temp file.)
                    logger.warning("Failed to delete temp file " + tempLocationPath.toString());
                }
                // Any necessary post-processing: 
                performPostProcessingTasks(dataFile);
            }
        }
    }
}