Example usage for java.io IOException initCause

List of usage examples for java.io IOException initCause

Introduction

In this page you can find the example usage for java.io IOException initCause.

Prototype

public synchronized Throwable initCause(Throwable cause) 

Source Link

Document

Initializes the cause of this throwable to the specified value.

Usage

From source file:org.apache.jackrabbit.core.query.lucene.SearchIndex.java

/**
 * @return an initialized {@link DirectoryManager}.
 * @throws IOException if the directory manager cannot be instantiated or
 *          an exception occurs while initializing the manager.
 *///from   w w  w . j  av  a  2s .c  o m
protected DirectoryManager createDirectoryManager() throws IOException {
    try {
        Class clazz = Class.forName(directoryManagerClass);
        if (!DirectoryManager.class.isAssignableFrom(clazz)) {
            throw new IOException(directoryManagerClass + " is not a DirectoryManager implementation");
        }
        DirectoryManager df = (DirectoryManager) clazz.newInstance();
        df.init(this);
        return df;
    } catch (IOException e) {
        throw e;
    } catch (Exception e) {
        IOException ex = new IOException();
        ex.initCause(e);
        throw ex;
    }
}

From source file:com.youzu.android.framework.http.HttpHandler.java

@SuppressWarnings("unchecked")
private ResponseInfo<T> sendRequest(HttpRequestBase request) throws HttpException {

    HttpRequestRetryHandler retryHandler = client.getHttpRequestRetryHandler();
    while (true) {

        if (autoResume && isDownloadingFile) {
            File downloadFile = new File(fileSavePath);
            long fileLen = 0;
            if (downloadFile.isFile() && downloadFile.exists()) {
                fileLen = downloadFile.length();
            }/*from w  ww  .j  a  v a2s  . co  m*/
            if (fileLen > 0) {
                request.setHeader("RANGE", "bytes=" + fileLen + "-");
            }
        }

        boolean retry = true;
        IOException exception = null;
        try {
            //                ResponseInfo<T> responseInfo = null;
            //                if (!isCancelled()) {
            //                    HttpResponse response = client.execute(request, context);
            //                    responseInfo = handleResponse(response);
            ////                    CookieStore store = client.getCookieStore();
            //                }
            //                return responseInfo;

            requestMethod = request.getMethod();
            if (HttpUtils.sHttpCache.isEnabled(requestMethod)) {
                String result = HttpUtils.sHttpCache.get(requestUrl);
                if (result != null) {
                    return new ResponseInfo<T>(null, (T) result, true);
                }
            }
            ResponseInfo<T> responseInfo = null;
            if (!isCancelled()) {
                HttpResponse response = client.execute(request, context);
                responseInfo = handleResponse(response);
            }
            return responseInfo;
        } catch (UnknownHostException e) {
            Log.e("APP", "HttpHandler sendRequest UnknownHostException:" + e.getMessage());

            exception = e;
            retry = retryHandler.retryRequest(exception, ++retriedCount, context);
        } catch (IOException e) {
            Log.e("APP", "HttpHandler sendRequest IOException: " + e.toString());

            exception = e;
            retry = retryHandler.retryRequest(exception, ++retriedCount, context);
        } catch (NullPointerException e) {
            Log.e("APP", "HttpHandler sendRequest NullPointerException:" + e.getMessage());

            exception = new IOException(e.getMessage());
            exception.initCause(e);
            retry = retryHandler.retryRequest(exception, ++retriedCount, context);
        } catch (HttpException e) {
            Log.e("APP", "HttpHandler sendRequest HttpException:" + e.getMessage());

            throw e;
        } catch (Throwable e) {
            Log.e("APP", "HttpHandler sendRequest Throwable:" + e.getMessage());

            exception = new IOException(e.getMessage());
            exception.initCause(e);
            retry = retryHandler.retryRequest(exception, ++retriedCount, context);
        }

        Log.e("APP", "retry:" + retry);

        if (!retry) {
            HttpException httpException = new HttpException(exception);
            Log.e("APP", "HttpHandler sendRequest HttpException:" + httpException.getMessage());
            //                callback.onFailure(httpException,httpException.getMessage());
            throw httpException;
        }
    }
}

From source file:org.apache.hadoop.hbase.wal.WALSplitter.java

/**
 * log splitting implementation, splits one log file.
 * @param logfile should be an actual log file.
 *//*from   w  w  w .  ja  v  a2 s.  c  o m*/
boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws IOException {
    Preconditions.checkState(status == null);
    Preconditions.checkArgument(logfile.isFile(),
            "passed in file status is for something other than a regular file.");
    boolean isCorrupted = false;
    boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", SPLIT_SKIP_ERRORS_DEFAULT);
    int interval = conf.getInt("hbase.splitlog.report.interval.loglines", 1024);
    Path logPath = logfile.getPath();
    boolean outputSinkStarted = false;
    boolean progress_failed = false;
    int editsCount = 0;
    int editsSkipped = 0;

    status = TaskMonitor.get()
            .createStatus("Splitting log file " + logfile.getPath() + "into a temporary staging area.");
    Reader in = null;
    try {
        long logLength = logfile.getLen();
        LOG.info("Splitting wal: " + logPath + ", length=" + logLength);
        LOG.info("DistributedLogReplay = " + this.distributedLogReplay);
        status.setStatus("Opening log file");
        if (reporter != null && !reporter.progress()) {
            progress_failed = true;
            return false;
        }
        try {
            in = getReader(logfile, skipErrors, reporter);
        } catch (CorruptedLogFileException e) {
            LOG.warn("Could not get reader, corrupted log file " + logPath, e);
            ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
            isCorrupted = true;
        }
        if (in == null) {
            LOG.warn("Nothing to split in log file " + logPath);
            return true;
        }
        int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
        int numOpenedFilesLastCheck = 0;
        outputSink.setReporter(reporter);
        outputSink.startWriterThreads();
        outputSinkStarted = true;
        Entry entry;
        Long lastFlushedSequenceId = -1L;
        ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(logPath);
        failedServerName = (serverName == null) ? "" : serverName.getServerName();
        while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) {
            byte[] region = entry.getKey().getEncodedRegionName();
            String encodedRegionNameAsStr = Bytes.toString(region);
            lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr);
            if (lastFlushedSequenceId == null) {
                if (this.distributedLogReplay) {
                    RegionStoreSequenceIds ids = csm.getSplitLogWorkerCoordination()
                            .getRegionFlushedSequenceId(failedServerName, encodedRegionNameAsStr);
                    if (ids != null) {
                        lastFlushedSequenceId = ids.getLastFlushedSequenceId();
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("DLR Last flushed sequenceid for " + encodedRegionNameAsStr + ": "
                                    + TextFormat.shortDebugString(ids));
                        }
                    }
                } else if (sequenceIdChecker != null) {
                    RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region);
                    Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
                    for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) {
                        maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(),
                                storeSeqId.getSequenceId());
                    }
                    regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores);
                    lastFlushedSequenceId = ids.getLastFlushedSequenceId();
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("DLS Last flushed sequenceid for " + encodedRegionNameAsStr + ": "
                                + TextFormat.shortDebugString(ids));
                    }
                }
                if (lastFlushedSequenceId == null) {
                    lastFlushedSequenceId = -1L;
                }
                lastFlushedSequenceIds.put(encodedRegionNameAsStr, lastFlushedSequenceId);
            }
            if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
                editsSkipped++;
                continue;
            }
            entryBuffers.appendEntry(entry);
            editsCount++;
            int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck;
            // If sufficient edits have passed, check if we should report progress.
            if (editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) {
                numOpenedFilesLastCheck = this.getNumOpenWriters();
                String countsStr = (editsCount - (editsSkipped + outputSink.getSkippedEdits()))
                        + " edits, skipped " + editsSkipped + " edits.";
                status.setStatus("Split " + countsStr);
                if (reporter != null && !reporter.progress()) {
                    progress_failed = true;
                    return false;
                }
            }
        }
    } catch (InterruptedException ie) {
        IOException iie = new InterruptedIOException();
        iie.initCause(ie);
        throw iie;
    } catch (CorruptedLogFileException e) {
        LOG.warn("Could not parse, corrupted log file " + logPath, e);
        csm.getSplitLogWorkerCoordination().markCorrupted(rootDir, logfile.getPath().getName(), fs);
        isCorrupted = true;
    } catch (IOException e) {
        e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
        throw e;
    } finally {
        LOG.debug("Finishing writing output logs and closing down.");
        try {
            if (null != in) {
                in.close();
            }
        } catch (IOException exception) {
            LOG.warn("Could not close wal reader: " + exception.getMessage());
            LOG.debug("exception details", exception);
        }
        try {
            if (outputSinkStarted) {
                // Set progress_failed to true as the immediate following statement will reset its value
                // when finishWritingAndClose() throws exception, progress_failed has the right value
                progress_failed = true;
                progress_failed = outputSink.finishWritingAndClose() == null;
            }
        } finally {
            String msg = "Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions()
                    + " regions; edits skipped=" + editsSkipped + "; log file=" + logPath + ", length="
                    + logfile.getLen() + // See if length got updated post lease recovery
                    ", corrupted=" + isCorrupted + ", progress failed=" + progress_failed;
            LOG.info(msg);
            status.markComplete(msg);
        }
    }
    return !progress_failed;
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

@Override
public ImmutableCollection<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {//from  ww w  .j  a  v a 2s  .com
        // Clear so metrics doesn't find them.
        ImmutableCollection<StoreFile> result = storeEngine.getStoreFileManager().clearFiles();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region
                    .getStoreFileOpenAndCloseThreadPool("StoreFileCloserThread-" + this.getColumnFamilyName());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            IOException ioe = null;
            try {
                for (int i = 0; i < result.size(); i++) {
                    try {
                        Future<Void> future = completionService.take();
                        future.get();
                    } catch (InterruptedException e) {
                        if (ioe == null) {
                            ioe = new InterruptedIOException();
                            ioe.initCause(e);
                        }
                    } catch (ExecutionException e) {
                        if (ioe == null)
                            ioe = new IOException(e.getCause());
                    }
                }
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
            if (ioe != null)
                throw ioe;
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:org.apache.axis2.addressing.EndpointReference.java

/**
 * Read the EPR to the specified InputStream.
 *//* w  ww.  j  ava  2  s . c o m*/
public void readExternal(java.io.ObjectInput inObject) throws IOException, ClassNotFoundException {
    SafeObjectInputStream in = SafeObjectInputStream.install(inObject);

    // revision ID
    int revID = in.readInt();

    // make sure the object data is in a revision level we can handle
    if (revID != REVISION_2) {
        throw new ClassNotFoundException(ExternalizeConstants.UNSUPPORTED_REVID);
    }

    // String object id
    logCorrelationIDString = (String) in.readObject();

    // Read xml content
    in.readUTF(); // read marker
    int numBytes = in.readInt();

    byte[] serBytes = new byte[numBytes];

    // read the data from the input stream

    int bytesRead = 0;
    int numberOfBytesLastRead;

    while (bytesRead < numBytes) {
        numberOfBytesLastRead = in.read(serBytes, bytesRead, numBytes - bytesRead);

        if (numberOfBytesLastRead == -1) {
            // TODO: What should we do if the reconstitution fails?
            // For now, log the event and throw an exception
            if (log.isDebugEnabled()) {
                log.debug("readObject(): EPR logCorrelationID [" + logCorrelationIDString + "] "
                        + " ***WARNING*** unexpected end to data:    data read from input stream [" + bytesRead
                        + "]    expected data size [" + numBytes + "]");
            }

            IOException ioe = new IOException(
                    "Unable to deserialize the EndpointReference with logCorrelationID ["
                            + logCorrelationIDString + "]"
                            + "  Cause: Unexpected end to data from input stream");

            throw ioe;
        }

        bytesRead += numberOfBytesLastRead;
    }

    if (bytesRead == 0) {
        IOException ioe = new IOException("Unable to deserialize the EndpointReference with logCorrelationID ["
                + logCorrelationIDString + "]" + "  Cause: No data from input stream");

        throw ioe;
    }
    in.readUTF(); // read marker

    ByteArrayInputStream bais = new ByteArrayInputStream(serBytes);

    if (log.isDebugEnabled()) {
        String content = new String(serBytes);

        log.debug("readObject(): EPR logCorrelationID [" + logCorrelationIDString + "] "
                + "    expected content size [" + numBytes + "]" + "    content size [" + content.length() + "]"
                + "    EPR buffered content [" + content + "]");
    }

    XMLStreamReader xmlReader = null;

    try {
        OMElement om = OMXMLBuilderFactory.createOMBuilder(bais).getDocumentElement();

        // expand the OM so we can close the stream reader
        om.build();

        // trace point
        if (log.isDebugEnabled()) {
            log.debug(myClassName + ":readObject():  " + " EPR [" + logCorrelationIDString + "]"
                    + " EPR OM content [" + om.toString() + "]");
        }

        EndpointReferenceHelper.fromOM(this, om, AddressingConstants.Final.WSA_NAMESPACE);

    } catch (Exception e) {
        IOException ioe = new IOException("Unable to deserialize the EndpointReference with logCorrelationID ["
                + logCorrelationIDString + "]");
        ioe.initCause(e);

        if (log.isDebugEnabled()) {
            log.debug("readObject(): Unable to deserialize the EPR with logCorrelationID ["
                    + logCorrelationIDString + "]   original error [" + e.getClass().getName() + "]  message ["
                    + e.getMessage() + "]", e);
        }

        throw ioe;

    } finally {
        // Make sure that the reader is properly closed
        // Note that closing a ByteArrayInputStream has no effect

        if (xmlReader != null) {
            try {
                xmlReader.close();
            } catch (Exception e2) {
                IOException ioe2 = new IOException(
                        "Unable to close the XMLStreamReader for the EndpointReference with logCorrelationID ["
                                + logCorrelationIDString + "]");
                ioe2.initCause(e2);

                if (log.isDebugEnabled()) {
                    log.debug(
                            "readObject(): Unable to close the XMLStreamReader for the EPR with logCorrelationID ["
                                    + logCorrelationIDString + "]   original error [" + e2.getClass().getName()
                                    + "]  message [" + e2.getMessage() + "]",
                            e2);
                }

                throw ioe2;
            }
        }
    }
}

From source file:org.alfresco.filesys.repo.ContentDiskDriver2.java

/**
 * Delete the specified file.// w w  w.  j  a v a2s  . c o m
 * 
 * @param session Server session
 * @param tree Tree connection
 * @param rootNode Root node
 * @param path NetworkFile
 * @exception java.io.IOException The exception description.
 * @return NodeRef of deletedFile
 */
public NodeRef deleteFile2(final SrvSession session, final TreeConnection tree, NodeRef rootNode, String path)
        throws IOException {
    // Get the device context

    final ContentContext ctx = (ContentContext) tree.getContext();

    if (logger.isDebugEnabled()) {
        logger.debug("deleteFile:" + path + ", session:" + session.getUniqueId());
    }

    try {
        if (session.isPseudoFilesEnabled() && ctx.isPseudoFilesEnabled()) {
            String[] paths = FileName.splitPath(path);
            // lookup parent directory
            NodeRef dirNodeRef = getNodeForPath(tree, paths[0]);

            // Check whether we are closing a pseudo file
            if (ctx.getPseudoFileOverlay().isPseudoFile(dirNodeRef, paths[1])) {
                // pseudo delete a pseudo file
                ctx.getPseudoFileOverlay().delete(dirNodeRef, paths[1]);
                return null;
            }
        }

        // Check if there is a quota manager enabled, if so then we need to save the current file size

        final QuotaManager quotaMgr = ctx.getQuotaManager();

        // Get the node and delete it
        final NodeRef nodeRef = getNodeForPath(tree, path);

        if (fileFolderService.exists(nodeRef)) {
            lockKeeper.removeLock(nodeRef);

            // Get the size of the file being deleted        
            final FileInfo fInfo = quotaMgr == null ? null : getFileInformation(session, tree, path);

            if (logger.isDebugEnabled()) {
                logger.debug("deleted file" + path);
            }
            fileFolderService.delete(nodeRef);

            //TODO Needs to be post-commit
            if (quotaMgr != null) {
                quotaMgr.releaseSpace(session, tree, fInfo.getFileId(), path, fInfo.getSize());
            }

            // Debug

            if (logger.isDebugEnabled()) {
                logger.debug("Deleted file: " + path + ", nodeRef=" + nodeRef);
            }

            // void return
            return nodeRef;
        }
    } catch (NodeLockedException ex) {
        if (logger.isDebugEnabled()) {
            logger.debug("Delete file - access denied (locked)", ex);
        }
        // Convert to a filesystem access denied status

        throw new AccessDeniedException("Unable to delete " + path);
    } catch (org.alfresco.repo.security.permissions.AccessDeniedException ex) {
        // Debug

        if (logger.isDebugEnabled()) {
            logger.debug("Delete file - access denied", ex);
        }

        // Convert to a filesystem access denied status
        throw new AccessDeniedException("Unable to delete " + path);
    } catch (IOException ex) {
        // Allow I/O Exceptions to pass through
        if (logger.isDebugEnabled()) {
            logger.debug("Delete file error - pass through IO Exception", ex);
        }
        throw ex;
    } catch (Exception ex) {
        // Debug

        if (logger.isDebugEnabled()) {
            logger.debug("Delete file error", ex);
        }

        // Convert to a general I/O exception
        IOException ioe = new IOException("Delete file " + path);
        ioe.initCause(ex);
        throw ioe;
    }
    return null;
}

From source file:org.exoplatform.services.jcr.impl.core.query.lucene.SearchIndex.java

/**
 * @return an initialized {@link DirectoryManager}.
 * @throws IOException//from   w w  w  .jav a2 s  .c  o m
 *             if the directory manager cannot be instantiated or an
 *             exception occurs while initializing the manager.
 */
protected DirectoryManager createDirectoryManager() throws IOException {
    try {
        Class<?> clazz = ClassLoading.forName(directoryManagerClass, this);
        if (!DirectoryManager.class.isAssignableFrom(clazz)) {
            throw new IOException(directoryManagerClass + " is not a DirectoryManager implementation");
        }
        DirectoryManager df = (DirectoryManager) clazz.newInstance();
        df.init(this);
        return df;
    } catch (IOException e) {
        throw e;
    } catch (Exception e) {
        IOException ex = new IOException();
        ex.initCause(e);
        throw ex;
    }
}

From source file:org.apache.jackrabbit.spi2dav.RepositoryServiceImpl.java

/**
 * @see RepositoryService#getEvents(SessionInfo, EventFilter, long)
 *//*www .  j  a va  2  s  . co  m*/
public EventBundle getEvents(SessionInfo sessionInfo, EventFilter filter, long after)
        throws RepositoryException, UnsupportedRepositoryOperationException {
    // TODO: use filters remotely (JCR-3179)

    GetMethod method = null;
    String rootUri = uriResolver.getWorkspaceUri(sessionInfo.getWorkspaceName());
    rootUri += "?type=journal"; // TODO should have a way to discover URI template

    try {
        method = new GetMethod(rootUri);
        method.addRequestHeader("If-None-Match", "\"" + Long.toHexString(after) + "\""); // TODO
        initMethod(method, sessionInfo);

        getClient(sessionInfo).executeMethod(method);
        assert method.getStatusCode() == 200;

        InputStream in = method.getResponseBodyAsStream();
        Document doc = null;
        if (in != null) {
            // read response and try to build a xml document
            try {
                doc = DomUtil.parseDocument(in);
            } catch (ParserConfigurationException e) {
                IOException exception = new IOException("XML parser configuration error");
                exception.initCause(e);
                throw exception;
            } catch (SAXException e) {
                IOException exception = new IOException("XML parsing error");
                exception.initCause(e);
                throw exception;
            } finally {
                in.close();
            }
        }

        List<Event> events = new ArrayList<Event>();

        ElementIterator entries = DomUtil.getChildren(doc.getDocumentElement(), AtomFeedConstants.N_ENTRY);
        while (entries.hasNext()) {
            Element entryElem = entries.next();

            Element contentElem = DomUtil.getChildElement(entryElem, AtomFeedConstants.N_CONTENT);
            if (contentElem != null
                    && "application/vnd.apache.jackrabbit.event+xml".equals(contentElem.getAttribute("type"))) {
                List<Event> el = buildEventList(contentElem, (SessionInfoImpl) sessionInfo, rootUri);
                for (Event e : el) {
                    if (e.getDate() > after && (filter == null || filter.accept(e, false))) {
                        events.add(e);
                    }
                }
            }
        }

        return new EventBundleImpl(events, false);
    } catch (Exception ex) {
        log.error("extracting events from journal feed", ex);
        throw new RepositoryException(ex);
    }
}

From source file:org.alfresco.filesys.repo.ContentDiskDriver.java

/**
 * Delete the specified file.//from www  . j  a  v a  2s  .  c  o  m
 * 
 * @param sess Server session
 * @param tree Tree connection
 * @param name NetworkFile
 * @exception java.io.IOException The exception description.
 */
public void deleteFile(final SrvSession sess, final TreeConnection tree, final String name) throws IOException {
    // Get the device context

    if (logger.isDebugEnabled()) {
        logger.debug("Delete file - " + name);
    }

    final ContentContext ctx = (ContentContext) tree.getContext();

    try {
        // Check if pseudo files are enabled

        if (hasPseudoFileInterface(ctx)) {
            // Check if the file name is a pseudo file name

            if (getPseudoFileInterface(ctx).isPseudoFile(sess, tree, name)) {

                // Make sure the parent folder has a file state, and the path exists

                String[] paths = FileName.splitPath(name);
                FileState fstate = ctx.getStateCache().findFileState(paths[0]);

                if (fstate != null) {

                    // Check if the path is to a pseudo file

                    PseudoFile pfile = getPseudoFileInterface(ctx).getPseudoFile(sess, tree, name);
                    if (pfile != null) {
                        // Delete the pseudo file

                        getPseudoFileInterface(ctx).deletePseudoFile(sess, tree, name);
                        return;
                    }
                }
            }
        }

        // Check if there is a quota manager enabled, if so then we need to save the current file size

        final QuotaManager quotaMgr = ctx.getQuotaManager();

        // Perform repository updates in a retryable write transaction
        Callable<Void> postTxn = doInWriteTransaction(sess, new CallableIO<Callable<Void>>() {
            public Callable<Void> call() throws IOException {
                // Get the node and delete it
                final NodeRef nodeRef = getNodeForPath(tree, name);

                Callable<Void> result = null;
                if (fileFolderService.exists(nodeRef)) {
                    // Get the size of the file being deleted

                    final FileInfo fInfo = quotaMgr == null ? null : getFileInformation(sess, tree, name);

                    // Check if the node is versionable

                    final boolean isVersionable = nodeService.hasAspect(nodeRef,
                            ContentModel.ASPECT_VERSIONABLE);

                    if (logger.isDebugEnabled()) {
                        logger.debug("deleted file" + name);
                    }
                    fileFolderService.delete(nodeRef);

                    // Return the operations to perform when the transaction succeeds

                    result = new Callable<Void>() {

                        public Void call() throws Exception {
                            // Remove the file state

                            if (ctx.hasStateCache()) {
                                // Check if the node is versionable, cache the node details for a short while

                                if (isVersionable == true) {

                                    // Make sure the file state is cached for a short while, a new file may be
                                    // renamed to the same name
                                    // in which case we can connect the file to the previous version history

                                    FileState delState = ctx.getStateCache().findFileState(name, true);

                                    if (logger.isDebugEnabled()) {
                                        logger.debug("set delete on close" + name);
                                    }
                                    delState.setExpiryTime(
                                            System.currentTimeMillis() + FileState.DeleteTimeout);
                                    delState.setFileStatus(DeleteOnClose);
                                    delState.setFilesystemObject(nodeRef);
                                } else {

                                    // Remove the file state

                                    ctx.getStateCache().removeFileState(name);
                                }

                                // Update, or create, a parent folder file state

                                String[] paths = FileName.splitPath(name);
                                if (paths[0] != null && paths[0].length() > 1) {
                                    // Get the file state for the parent folder

                                    FileState parentState = getStateForPath(tree, paths[0]);
                                    if (parentState == null && ctx.hasStateCache())
                                        parentState = ctx.getStateCache().findFileState(paths[0], true);

                                    // Update the modification timestamp

                                    parentState.updateModifyDateTime();
                                }
                            }

                            // Release the space back to the users quota

                            if (quotaMgr != null)
                                quotaMgr.releaseSpace(sess, tree, fInfo.getFileId(), name, fInfo.getSize());

                            return null;
                        }
                    };
                }

                // Debug

                if (logger.isDebugEnabled()
                        && (ctx.hasDebug(AlfrescoContext.DBG_FILE) || ctx.hasDebug(AlfrescoContext.DBG_RENAME)))
                    logger.debug("Deleted file: " + name + ", node=" + nodeRef);

                return result;
            }
        });

        // Perform state updates after the transaction succeeds
        postTxn.call();
    } catch (NodeLockedException ex) {
        // Debug

        if (logger.isDebugEnabled() && ctx.hasDebug(AlfrescoContext.DBG_FILE))
            logger.debug("Delete file - access denied (locked)");

        // Convert to a filesystem access denied status

        throw new AccessDeniedException("Delete " + name);
    } catch (org.alfresco.repo.security.permissions.AccessDeniedException ex) {
        // Debug

        if (logger.isDebugEnabled() && ctx.hasDebug(AlfrescoContext.DBG_FILE))
            logger.debug("Delete file - access denied");

        // Convert to a filesystem access denied status

        throw new AccessDeniedException("Delete " + name);
    } catch (IOException ex) {
        // Allow I/O Exceptions to pass through
        throw ex;
    } catch (Exception ex) {
        // Debug

        if (logger.isDebugEnabled() && ctx.hasDebug(AlfrescoContext.DBG_FILE))
            logger.debug("Delete file error", ex);

        // Convert to a general I/O exception

        IOException ioe = new IOException("Delete file " + name);
        ioe.initCause(ex);
        throw ioe;
    }
}

From source file:org.apache.lucene.index.IndexWriter.java

/** Just like {@link #forceMergeDeletes()}, except you can
 *  specify whether the call should block until the
 *  operation completes.  This is only meaningful with a
 *  {@link MergeScheduler} that is able to run merges in
 *  background threads./* w w w .  ja  va  2  s . c  o m*/
 *
 * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
 * you should immediately close the writer.  See <a
 * href="#OOME">above</a> for details.</p>
 *
 * <p><b>NOTE</b>: if you call {@link #close(boolean)}
 * with <tt>false</tt>, which aborts all running merges,
 * then any thread still running this method might hit a
 * {@link MergePolicy.MergeAbortedException}.
 */
public void forceMergeDeletes(boolean doWait) throws IOException {
    ensureOpen();

    flush(true, true);

    if (infoStream.isEnabled("IW")) {
        infoStream.message("IW", "forceMergeDeletes: index now " + segString());
    }

    MergePolicy.MergeSpecification spec;

    synchronized (this) {
        spec = mergePolicy.findForcedDeletesMerges(segmentInfos);
        if (spec != null) {
            final int numMerges = spec.merges.size();
            for (int i = 0; i < numMerges; i++)
                registerMerge(spec.merges.get(i));
        }
    }

    mergeScheduler.merge(this);

    if (spec != null && doWait) {
        final int numMerges = spec.merges.size();
        synchronized (this) {
            boolean running = true;
            while (running) {

                if (hitOOM) {
                    throw new IllegalStateException(
                            "this writer hit an OutOfMemoryError; cannot complete forceMergeDeletes");
                }

                // Check each merge that MergePolicy asked us to
                // do, to see if any of them are still running and
                // if any of them have hit an exception.
                running = false;
                for (int i = 0; i < numMerges; i++) {
                    final MergePolicy.OneMerge merge = spec.merges.get(i);
                    if (pendingMerges.contains(merge) || runningMerges.contains(merge)) {
                        running = true;
                    }
                    Throwable t = merge.getException();
                    if (t != null) {
                        IOException ioe = new IOException(
                                "background merge hit exception: " + merge.segString(directory));
                        ioe.initCause(t);
                        throw ioe;
                    }
                }

                // If any of our merges are still running, wait:
                if (running)
                    doWait();
            }
        }
    }

    // NOTE: in the ConcurrentMergeScheduler case, when
    // doWait is false, we can return immediately while
    // background threads accomplish the merging
}