Example usage for java.io InterruptedIOException initCause

List of usage examples for java.io InterruptedIOException initCause

Introduction

In this page you can find the example usage for java.io InterruptedIOException initCause.

Prototype

public synchronized Throwable initCause(Throwable cause) 

Source Link

Document

Initializes the cause of this throwable to the specified value.

Usage

From source file:org.apache.hadoop.hbase.MetaTableAccessor.java

/**
 * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
 * @param connection connection we're using
 * @param mutations Puts and Deletes to execute on hbase:meta
 * @throws IOException//from ww w . j av a  2 s.  c om
 */
public static void mutateMetaTable(final Connection connection, final List<Mutation> mutations)
        throws IOException {
    Table t = getMetaHTable(connection);
    try {
        if (METALOG.isDebugEnabled()) {
            METALOG.debug(mutationsToString(mutations));
        }
        t.batch(mutations, null);
    } catch (InterruptedException e) {
        InterruptedIOException ie = new InterruptedIOException(e.getMessage());
        ie.initCause(e);
        throw ie;
    } finally {
        t.close();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

/**
 * Tries to acquire a lock on the given row.
 * @param waitForLock if true, will block until the lock is available.
 *        Otherwise, just tries to obtain the lock and returns
 *        false if unavailable.//from  w w w  .jav a  2s .com
 * @return the row lock if acquired,
 *   null if waitForLock was false and the lock was not acquired
 * @throws IOException if waitForLock was true and the lock could not be acquired after waiting
 */
public RowLock getRowLock(byte[] row, boolean waitForLock) throws IOException {
    checkRow(row, "row lock");
    startRegionOperation();
    try {
        HashedBytes rowKey = new HashedBytes(row);
        RowLockContext rowLockContext = new RowLockContext(rowKey);

        // loop until we acquire the row lock (unless !waitForLock)
        while (true) {
            RowLockContext existingContext = lockedRows.putIfAbsent(rowKey, rowLockContext);
            if (existingContext == null) {
                // Row is not already locked by any thread, use newly created context.
                break;
            } else if (existingContext.ownedByCurrentThread()) {
                // Row is already locked by current thread, reuse existing context instead.
                rowLockContext = existingContext;
                break;
            } else {
                // Row is already locked by some other thread, give up or wait for it
                if (!waitForLock) {
                    return null;
                }
                try {
                    if (!existingContext.latch.await(this.rowLockWaitDuration, TimeUnit.MILLISECONDS)) {
                        throw new IOException("Timed out waiting for lock for row: " + rowKey);
                    }
                } catch (InterruptedException ie) {
                    LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
                    InterruptedIOException iie = new InterruptedIOException();
                    iie.initCause(ie);
                    throw iie;
                }
            }
        }

        // allocate new lock for this thread
        return rowLockContext.newLock();
    } finally {
        closeRegionOperation();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

/**
 * Try to acquire a lock.  Throw RegionTooBusyException
 * if failed to get the lock in time. Throw InterruptedIOException
 * if interrupted while waiting for the lock.
 *///from w  w w  .  j  av a2s.c om
private void lock(final Lock lock, final int multiplier) throws RegionTooBusyException, InterruptedIOException {
    try {
        final long waitTime = Math.min(maxBusyWaitDuration,
                busyWaitDuration * Math.min(multiplier, maxBusyWaitMultiplier));
        if (!lock.tryLock(waitTime, TimeUnit.MILLISECONDS)) {
            throw new RegionTooBusyException("failed to get a lock in " + waitTime + " ms. " + "regionName="
                    + (this.getRegionInfo() == null ? "unknown" : this.getRegionInfo().getRegionNameAsString())
                    + ", server=" + (this.getRegionServerServices() == null ? "unknown"
                            : this.getRegionServerServices().getServerName()));
        }
    } catch (InterruptedException ie) {
        LOG.info("Interrupted while waiting for a lock");
        InterruptedIOException iie = new InterruptedIOException();
        iie.initCause(ie);
        throw iie;
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogFactory.java

public static HLog.Reader createReader(final FileSystem fs, final Path path, Configuration conf,
        CancelableProgressable reporter, boolean allowCustom) throws IOException {
    if (allowCustom && (logReaderClass == null)) {
        logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
                Reader.class);
    }//  w  ww. java  2 s.  c om
    Class<? extends Reader> lrClass = allowCustom ? logReaderClass : ProtobufLogReader.class;

    try {
        // A hlog file could be under recovery, so it may take several
        // tries to get it open. Instead of claiming it is corrupted, retry
        // to open it up to 5 minutes by default.
        long startWaiting = EnvironmentEdgeManager.currentTimeMillis();
        long openTimeout = conf.getInt("hbase.hlog.open.timeout", 300000) + startWaiting;
        int nbAttempt = 0;
        while (true) {
            try {
                if (lrClass != ProtobufLogReader.class) {
                    // User is overriding the WAL reader, let them.
                    HLog.Reader reader = lrClass.newInstance();
                    reader.init(fs, path, conf, null);
                    return reader;
                } else {
                    FSDataInputStream stream = fs.open(path);
                    // Note that zero-length file will fail to read PB magic, and attempt to create
                    // a non-PB reader and fail the same way existing code expects it to. If we get
                    // rid of the old reader entirely, we need to handle 0-size files differently from
                    // merely non-PB files.
                    byte[] magic = new byte[ProtobufLogReader.PB_WAL_MAGIC.length];
                    boolean isPbWal = (stream.read(magic) == magic.length)
                            && Arrays.equals(magic, ProtobufLogReader.PB_WAL_MAGIC);
                    HLog.Reader reader = isPbWal ? new ProtobufLogReader() : new SequenceFileLogReader();
                    reader.init(fs, path, conf, stream);
                    return reader;
                }
            } catch (IOException e) {
                String msg = e.getMessage();
                if (msg != null && (msg.contains("Cannot obtain block length")
                        || msg.contains("Could not obtain the last block")
                        || msg.matches("Blocklist for [^ ]* has changed.*"))) {
                    if (++nbAttempt == 1) {
                        LOG.warn("Lease should have recovered. This is not expected. Will retry", e);
                    }
                    if (reporter != null && !reporter.progress()) {
                        throw new InterruptedIOException("Operation is cancelled");
                    }
                    if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTimeMillis()) {
                        LOG.error("Can't open after " + nbAttempt + " attempts and "
                                + (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms " + " for "
                                + path);
                    } else {
                        try {
                            Thread.sleep(nbAttempt < 3 ? 500 : 1000);
                            continue; // retry
                        } catch (InterruptedException ie) {
                            InterruptedIOException iioe = new InterruptedIOException();
                            iioe.initCause(ie);
                            throw iioe;
                        }
                    }
                }
                throw e;
            }
        }
    } catch (IOException ie) {
        throw ie;
    } catch (Exception e) {
        throw new IOException("Cannot get log reader", e);
    }
}

From source file:org.apache.hadoop.hbase.replication.regionserver.HFileReplicator.java

private Map<String, Path> copyHFilesToStagingDir() throws IOException {
    Map<String, Path> mapOfCopiedHFiles = new HashMap<String, Path>();
    Pair<byte[], List<String>> familyHFilePathsPair;
    List<String> hfilePaths;
    byte[] family;
    Path familyStagingDir;/*from   ww w . j  a  v a  2s .  c  om*/
    int familyHFilePathsPairsListSize;
    int totalNoOfHFiles;
    List<Pair<byte[], List<String>>> familyHFilePathsPairsList;
    FileSystem sourceFs = null;

    try {
        Path sourceClusterPath = new Path(sourceBaseNamespaceDirPath);
        /*
         * Path#getFileSystem will by default get the FS from cache. If both source and sink cluster
         * has same FS name service then it will return peer cluster FS. To avoid this we explicitly
         * disable the loading of FS from cache, so that a new FS is created with source cluster
         * configuration.
         */
        String sourceScheme = sourceClusterPath.toUri().getScheme();
        String disableCacheName = String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme });
        sourceClusterConf.setBoolean(disableCacheName, true);

        sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf);

        User user = userProvider.getCurrent();
        // For each table name in the map
        for (Entry<String, List<Pair<byte[], List<String>>>> tableEntry : bulkLoadHFileMap.entrySet()) {
            String tableName = tableEntry.getKey();

            // Create staging directory for each table
            Path stagingDir = createStagingDir(new Path(hbaseStagingDir), user, TableName.valueOf(tableName));

            familyHFilePathsPairsList = tableEntry.getValue();
            familyHFilePathsPairsListSize = familyHFilePathsPairsList.size();

            // For each list of family hfile paths pair in the table
            for (int i = 0; i < familyHFilePathsPairsListSize; i++) {
                familyHFilePathsPair = familyHFilePathsPairsList.get(i);

                family = familyHFilePathsPair.getFirst();
                hfilePaths = familyHFilePathsPair.getSecond();

                familyStagingDir = new Path(stagingDir, Bytes.toString(family));
                totalNoOfHFiles = hfilePaths.size();

                // For each list of hfile paths for the family
                List<Future<Void>> futures = new ArrayList<Future<Void>>();
                Callable<Void> c;
                Future<Void> future;
                int currentCopied = 0;
                // Copy the hfiles parallely
                while (totalNoOfHFiles > currentCopied + this.copiesPerThread) {
                    c = new Copier(sourceFs, familyStagingDir,
                            hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread));
                    future = exec.submit(c);
                    futures.add(future);
                    currentCopied += this.copiesPerThread;
                }

                int remaining = totalNoOfHFiles - currentCopied;
                if (remaining > 0) {
                    c = new Copier(sourceFs, familyStagingDir,
                            hfilePaths.subList(currentCopied, currentCopied + remaining));
                    future = exec.submit(c);
                    futures.add(future);
                }

                for (Future<Void> f : futures) {
                    try {
                        f.get();
                    } catch (InterruptedException e) {
                        InterruptedIOException iioe = new InterruptedIOException(
                                "Failed to copy HFiles to local file system. This will be retried again "
                                        + "by the source cluster.");
                        iioe.initCause(e);
                        throw iioe;
                    } catch (ExecutionException e) {
                        throw new IOException("Failed to copy HFiles to local file system. This will "
                                + "be retried again by the source cluster.", e);
                    }
                }
            }
            // Add the staging directory to this table. Staging directory contains all the hfiles
            // belonging to this table
            mapOfCopiedHFiles.put(tableName, stagingDir);
        }
        return mapOfCopiedHFiles;
    } finally {
        if (sourceFs != null) {
            sourceFs.close();
        }
        if (exec != null) {
            exec.shutdown();
        }
    }
}

From source file:org.apache.hadoop.hbase.util.FSHDFSUtils.java

boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p, final Configuration conf,
        final CancelableProgressable reporter) throws IOException {
    LOG.info("Recovering lease on dfs file " + p);
    long startWaiting = EnvironmentEdgeManager.currentTimeMillis();
    // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS
    // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
    // beyond that limit 'to be safe'.
    long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
    // This setting should be a little bit above what the cluster dfs heartbeat is set to.
    long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
    // This should be set to how long it'll take for us to timeout against primary datanode if it
    // is dead.  We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the
    // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY.
    long subsequentPause = conf.getInt("hbase.lease.recovery.dfs.timeout", 61 * 1000);

    Method isFileClosedMeth = null;
    // whether we need to look for isFileClosed method
    boolean findIsFileClosedMeth = true;
    boolean recovered = false;
    // We break the loop if we succeed the lease recovery, timeout, or we throw an exception.
    for (int nbAttempt = 0; !recovered; nbAttempt++) {
        recovered = recoverLease(dfs, nbAttempt, p, startWaiting);
        if (recovered)
            break;
        checkIfCancelled(reporter);//from   w w w .  j  a  va2  s  .  c  o  m
        if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting))
            break;
        try {
            // On the first time through wait the short 'firstPause'.
            if (nbAttempt == 0) {
                Thread.sleep(firstPause);
            } else {
                // Cycle here until subsequentPause elapses.  While spinning, check isFileClosed if
                // available (should be in hadoop 2.0.5... not in hadoop 1 though.
                long localStartWaiting = EnvironmentEdgeManager.currentTimeMillis();
                while ((EnvironmentEdgeManager.currentTimeMillis() - localStartWaiting) < subsequentPause) {
                    Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
                    if (findIsFileClosedMeth) {
                        try {
                            isFileClosedMeth = dfs.getClass().getMethod("isFileClosed",
                                    new Class[] { Path.class });
                        } catch (NoSuchMethodException nsme) {
                            LOG.debug("isFileClosed not available");
                        } finally {
                            findIsFileClosedMeth = false;
                        }
                    }
                    if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) {
                        recovered = true;
                        break;
                    }
                    checkIfCancelled(reporter);
                }
            }
        } catch (InterruptedException ie) {
            InterruptedIOException iioe = new InterruptedIOException();
            iioe.initCause(ie);
            throw iioe;
        }
    }
    return recovered;
}

From source file:org.apache.hadoop.hbase.util.ModifyRegionUtils.java

/**
 * Triggers a bulk assignment of the specified regions
 *
 * @param assignmentManager the Assignment Manger
 * @param regionInfos the list of regions to assign
 * @throws IOException if an error occurred during the assignment
 *///w  w w .java 2  s  . co  m
public static void assignRegions(final AssignmentManager assignmentManager, final List<HRegionInfo> regionInfos)
        throws IOException {
    try {
        assignmentManager.getRegionStates().createRegionStates(regionInfos);
        assignmentManager.assign(regionInfos);
    } catch (InterruptedException e) {
        LOG.error("Caught " + e + " during round-robin assignment");
        InterruptedIOException ie = new InterruptedIOException(e.getMessage());
        ie.initCause(e);
        throw ie;
    }
}

From source file:org.apache.hadoop.hbase.wal.WALFactory.java

public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter,
        boolean allowCustom) throws IOException {
    Class<? extends DefaultWALProvider.Reader> lrClass = allowCustom ? logReaderClass : ProtobufLogReader.class;

    try {// ww w.  ja v  a  2 s . co m
        // A wal file could be under recovery, so it may take several
        // tries to get it open. Instead of claiming it is corrupted, retry
        // to open it up to 5 minutes by default.
        long startWaiting = EnvironmentEdgeManager.currentTime();
        long openTimeout = timeoutMillis + startWaiting;
        int nbAttempt = 0;
        FSDataInputStream stream = null;
        while (true) {
            try {
                if (lrClass != ProtobufLogReader.class) {
                    // User is overriding the WAL reader, let them.
                    DefaultWALProvider.Reader reader = lrClass.newInstance();
                    reader.init(fs, path, conf, null);
                    return reader;
                } else {
                    stream = fs.open(path);
                    // Note that zero-length file will fail to read PB magic, and attempt to create
                    // a non-PB reader and fail the same way existing code expects it to. If we get
                    // rid of the old reader entirely, we need to handle 0-size files differently from
                    // merely non-PB files.
                    byte[] magic = new byte[ProtobufLogReader.PB_WAL_MAGIC.length];
                    boolean isPbWal = (stream.read(magic) == magic.length)
                            && Arrays.equals(magic, ProtobufLogReader.PB_WAL_MAGIC);
                    DefaultWALProvider.Reader reader = isPbWal ? new ProtobufLogReader()
                            : new SequenceFileLogReader();
                    reader.init(fs, path, conf, stream);
                    return reader;
                }
            } catch (IOException e) {
                try {
                    if (stream != null) {
                        stream.close();
                    }
                } catch (IOException exception) {
                    LOG.warn("Could not close FSDataInputStream" + exception.getMessage());
                    LOG.debug("exception details", exception);
                }
                String msg = e.getMessage();
                if (msg != null && (msg.contains("Cannot obtain block length")
                        || msg.contains("Could not obtain the last block")
                        || msg.matches("Blocklist for [^ ]* has changed.*"))) {
                    if (++nbAttempt == 1) {
                        LOG.warn("Lease should have recovered. This is not expected. Will retry", e);
                    }
                    if (reporter != null && !reporter.progress()) {
                        throw new InterruptedIOException("Operation is cancelled");
                    }
                    if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) {
                        LOG.error("Can't open after " + nbAttempt + " attempts and "
                                + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for "
                                + path);
                    } else {
                        try {
                            Thread.sleep(nbAttempt < 3 ? 500 : 1000);
                            continue; // retry
                        } catch (InterruptedException ie) {
                            InterruptedIOException iioe = new InterruptedIOException();
                            iioe.initCause(ie);
                            throw iioe;
                        }
                    }
                }
                throw e;
            }
        }
    } catch (IOException ie) {
        throw ie;
    } catch (Exception e) {
        throw new IOException("Cannot get log reader", e);
    }
}

From source file:org.apache.hadoop.hbase.wal.WALKey.java

/**
 * Wait for sequence number to be assigned &amp; return the assigned value.
 * @param maxWaitForSeqId maximum time to wait in milliseconds for sequenceid
 * @return long the new assigned sequence number
 * @throws IOException//from w  w  w.  j  a  v a 2 s.c  o m
 */
public long getSequenceId(final long maxWaitForSeqId) throws IOException {
    // TODO: This implementation waiting on a latch is problematic because if a higher level
    // determines we should stop or abort, there is no global list of all these blocked WALKeys
    // waiting on a sequence id; they can't be cancelled... interrupted. See getNextSequenceId.
    //
    // UPDATE: I think we can remove the timeout now we are stamping all walkeys with sequenceid,
    // even those that have failed (previously we were not... so they would just hang out...).
    // St.Ack 20150910
    try {
        if (maxWaitForSeqId < 0) {
            this.seqNumAssignedLatch.await();
        } else if (!this.seqNumAssignedLatch.await(maxWaitForSeqId, TimeUnit.MILLISECONDS)) {
            throw new TimeoutIOException("Failed to get sequenceid after " + maxWaitForSeqId
                    + "ms; WAL system stuck or has gone away?");
        }
    } catch (InterruptedException ie) {
        LOG.warn("Thread interrupted waiting for next log sequence number");
        InterruptedIOException iie = new InterruptedIOException();
        iie.initCause(ie);
        throw iie;
    }
    return this.logSeqNum;
}

From source file:org.apache.hadoop.hdfs.DFSOutputStream.java

/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno,
        boolean lastPacketInBlock) throws InterruptedIOException {
    final byte[] buf;
    final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

    try {//from  ww  w. jav a 2 s  . c  o  m
        buf = byteArrayManager.newByteArray(bufferSize);
    } catch (InterruptedException ie) {
        final InterruptedIOException iioe = new InterruptedIOException("seqno=" + seqno);
        iioe.initCause(ie);
        throw iioe;
    }

    return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno, checksum.getChecksumSize(),
            lastPacketInBlock);
}