Example usage for java.util Iterator wait

List of usage examples for java.util Iterator wait

Introduction

In this page you can find the example usage for java.util Iterator wait.

Prototype

public final void wait() throws InterruptedException 

Source Link

Document

Causes the current thread to wait until it is awakened, typically by being notified or interrupted.

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.FSDirDeleteOp.java

/**
 * Remove a file/directory from the namespace.
 * <p>//from   w w  w  .j  ava 2  s .  c  o  m
 * For large directories, deletion is incremental. The blocks under
 * the directory are collected and deleted a small number at a time holding
 * the {@link FSNamesystem} lock.
 * <p>
 * For small directory or file the deletion is done in one shot.
 *
 */
static boolean delete(final FSNamesystem fsn, String srcArg, final boolean recursive) throws IOException {
    final FSDirectory fsd = fsn.getFSDirectory();
    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(srcArg);
    final String src = fsd.resolvePath(srcArg, pathComponents);

    if (!recursive) {
        // It is safe to do this as it will only delete a single file or an empty directory
        return deleteTransaction(fsn, src, recursive);
    }

    PathInformation pathInfo = fsn.getPathExistingINodesFromDB(src, false, null, FsAction.WRITE, null, null);
    INode pathInode = pathInfo.getINodesInPath().getLastINode();

    if (pathInode == null) {
        NameNode.stateChangeLog.debug("Failed to remove " + src + " because it does not exist");
        return false;
    } else if (pathInode.isRoot()) {
        NameNode.stateChangeLog
                .warn("Failed to remove " + src + " because the root is not allowed to be deleted");
        return false;
    }

    INodeIdentifier subtreeRoot = null;
    if (pathInode.isFile()) {
        return deleteTransaction(fsn, src, false);
    }

    RetryCache.CacheEntry cacheEntry = fsn.retryCacheWaitForCompletionTransactional();
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        return true; // Return previous response
    }
    boolean ret = false;
    try {
        //if quota is enabled then only the leader namenode can delete the directory.
        //this is because before the deletion is done the quota manager has to apply all the outstanding
        //quota updates for the directory. The current design of the quota manager is not distributed.
        //HopsFS clients send the delete operations to the leader namenode if quota is enabled
        if (!fsn.isLeader()) {
            throw new QuotaUpdateException(
                    "Unable to delete the file " + src + " because Quota is enabled and I am not the leader");
        }

        //sub tree operation
        try {
            //once subtree is locked we still need to check all subAccess in AbstractFileTree.FileTree
            //permission check in Apache Hadoop: doCheckOwner:false, ancestorAccess:null, parentAccess:FsAction.WRITE, 
            //access:null, subAccess:FsAction.ALL, ignoreEmptyDir:true
            subtreeRoot = fsn.lockSubtreeAndCheckOwnerAndParentPermission(src, false, FsAction.WRITE,
                    SubTreeOperation.Type.DELETE_STO);

            List<AclEntry> nearestDefaultsForSubtree = fsn.calculateNearestDefaultAclForSubtree(pathInfo);
            AbstractFileTree.FileTree fileTree = new AbstractFileTree.FileTree(fsn, subtreeRoot, FsAction.ALL,
                    true, nearestDefaultsForSubtree, subtreeRoot.getStoragePolicy());
            fileTree.buildUp(fsd.getBlockStoragePolicySuite());
            fsn.delayAfterBbuildingTree("Built tree for " + srcArg + " for delete op");

            if (fsd.isQuotaEnabled()) {
                Iterator<Long> idIterator = fileTree.getAllINodesIds().iterator();
                synchronized (idIterator) {
                    fsn.getQuotaUpdateManager().addPrioritizedUpdates(idIterator);
                    try {
                        idIterator.wait();
                    } catch (InterruptedException e) {
                        // Not sure if this can happen if we are not shutting down but we need to abort in case it happens.
                        throw new IOException("Operation failed due to an Interrupt");
                    }
                }
            }

            for (int i = fileTree.getHeight(); i > 0; i--) {
                if (!deleteTreeLevel(fsn, src, fileTree.getSubtreeRoot().getId(), fileTree, i)) {
                    ret = false;
                    return ret;
                }
            }
        } finally {
            if (subtreeRoot != null) {
                fsn.unlockSubtree(src, subtreeRoot.getInodeId());
            }
        }
        ret = true;
        return ret;
    } finally {
        fsn.retryCacheSetStateTransactional(cacheEntry, ret);
    }
}