Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException() 

Source Link

Document

Constructs an InterruptedIOException with null as its error detail message.

Usage

From source file:org.marketcetera.util.except.ExceptUtilsTest.java

@Test
public void interruptException() {
    assertFalse(ExceptUtils.isInterruptException(new CloneNotSupportedException()));
    assertTrue(ExceptUtils.isInterruptException(new InterruptedException()));
    assertTrue(ExceptUtils.isInterruptException(new InterruptedIOException()));
    assertTrue(ExceptUtils.isInterruptException(new ClosedByInterruptException()));
    assertTrue(ExceptUtils.isInterruptException(new FileLockInterruptionException()));
    assertTrue(ExceptUtils.isInterruptException(new InterruptedNamingException()));
    assertTrue(ExceptUtils.isInterruptException(new I18NInterruptedException()));
    assertTrue(ExceptUtils.isInterruptException(new I18NInterruptedRuntimeException()));
}

From source file:org.hypertable.DfsBroker.hadoop.FSHDFSUtils.java

boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p, final Configuration conf)
        throws IOException {
    LOG.info("Recovering lease on dfs file " + p);
    long startWaiting = System.currentTimeMillis();
    // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS
    // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
    // beyond that limit 'to be safe'.
    long recoveryTimeout = 900000 + startWaiting;
    // This setting should be what the cluster dfs heartbeat is set to.
    long firstPause = 3000;
    // This should be set to how long it'll take for us to timeout against primary datanode if it
    // is dead.  We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the
    // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY.
    long subsequentPause = 61000;

    Method isFileClosedMeth = null;
    // whether we need to look for isFileClosed method
    boolean findIsFileClosedMeth = true;
    boolean recovered = false;
    // We break the loop if we succeed the lease recovery, timeout, or we throw an exception.
    for (int nbAttempt = 0; !recovered; nbAttempt++) {
        recovered = recoverLease(dfs, nbAttempt, p, startWaiting);
        if (recovered)
            break;
        if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting))
            break;
        try {/* ww  w  . ja va  2s  . c om*/
            // On the first time through wait the short 'firstPause'.
            if (nbAttempt == 0) {
                Thread.sleep(firstPause);
            } else {
                // Cycle here until subsequentPause elapses.  While spinning, check isFileClosed if
                // available (should be in hadoop 2.0.5... not in hadoop 1 though.
                long localStartWaiting = System.currentTimeMillis();
                while ((System.currentTimeMillis() - localStartWaiting) < subsequentPause) {
                    Thread.sleep(1000);
                    if (findIsFileClosedMeth) {
                        try {
                            isFileClosedMeth = dfs.getClass().getMethod("isFileClosed",
                                    new Class[] { Path.class });
                        } catch (NoSuchMethodException nsme) {
                            LOG.debug("isFileClosed not available");
                        } finally {
                            findIsFileClosedMeth = false;
                        }
                    }
                    if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) {
                        recovered = true;
                        break;
                    }
                }
            }
        } catch (InterruptedException ie) {
            InterruptedIOException iioe = new InterruptedIOException();
            iioe.initCause(ie);
            throw iioe;
        }
    }
    return recovered;
}

From source file:org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler.java

@Override
public void process() throws IOException {
    boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(region_a);
    if (regionAHasMergeQualifier || !catalogJanitor.cleanMergeQualifier(region_b)) {
        LOG.info("Skip merging regions " + region_a.getRegionNameAsString() + ", "
                + region_b.getRegionNameAsString() + ", because region "
                + (regionAHasMergeQualifier ? region_a.getEncodedName() : region_b.getEncodedName())
                + " has merge qualifier");
        return;//from w  w  w .  jav  a  2 s. com
    }

    RegionStates regionStates = masterServices.getAssignmentManager().getRegionStates();
    ServerName region_a_location = regionStates.getRegionServerOfRegion(region_a);
    ServerName region_b_location = regionStates.getRegionServerOfRegion(region_b);
    if (region_a_location == null || region_b_location == null) {
        LOG.info("Skip merging regions " + region_a.getRegionNameAsString() + ", "
                + region_b.getRegionNameAsString() + ", because region "
                + (region_a_location == null ? region_a.getEncodedName() : region_b.getEncodedName())
                + " is not online now");
        return;
    }
    long startTime = EnvironmentEdgeManager.currentTimeMillis();
    boolean onSameRS = region_a_location.equals(region_b_location);

    // Make sure regions are on the same regionserver before send merge
    // regions request to regionserver
    if (!onSameRS) {
        // Move region_b to region a's location, switch region_a and region_b if
        // region_a's load lower than region_b's, so we will always move lower
        // load region
        RegionLoad loadOfRegionA = getRegionLoad(region_a_location, region_a);
        RegionLoad loadOfRegionB = getRegionLoad(region_b_location, region_b);
        if (loadOfRegionA != null && loadOfRegionB != null
                && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) {
            // switch region_a and region_b
            HRegionInfo tmpRegion = this.region_a;
            this.region_a = this.region_b;
            this.region_b = tmpRegion;
            ServerName tmpLocation = region_a_location;
            region_a_location = region_b_location;
            region_b_location = tmpLocation;
        }

        RegionPlan regionPlan = new RegionPlan(region_b, region_b_location, region_a_location);
        LOG.info("Moving regions to same server for merge: " + regionPlan.toString());
        masterServices.getAssignmentManager().balance(regionPlan);
        while (!masterServices.isStopped()) {
            try {
                Thread.sleep(20);
                // Make sure check RIT first, then get region location, otherwise
                // we would make a wrong result if region is online between getting
                // region location and checking RIT
                boolean isRIT = regionStates.isRegionInTransition(region_b);
                region_b_location = masterServices.getAssignmentManager().getRegionStates()
                        .getRegionServerOfRegion(region_b);
                onSameRS = region_a_location.equals(region_b_location);
                if (onSameRS || !isRIT) {
                    // Regions are on the same RS, or region_b is not in
                    // RegionInTransition any more
                    break;
                }
                if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout)
                    break;
            } catch (InterruptedException e) {
                InterruptedIOException iioe = new InterruptedIOException();
                iioe.initCause(e);
                throw iioe;
            }
        }
    }

    if (onSameRS) {
        startTime = EnvironmentEdgeManager.currentTimeMillis();
        while (!masterServices.isStopped()) {
            try {
                masterServices.getServerManager().sendRegionsMerge(region_a_location, region_a, region_b,
                        forcible);
                LOG.info(
                        "Sent merge to server " + region_a_location + " for region " + region_a.getEncodedName()
                                + "," + region_b.getEncodedName() + ", focible=" + forcible);
                break;
            } catch (RegionOpeningException roe) {
                if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) {
                    LOG.warn("Failed sending merge to " + region_a_location + " after " + timeout + "ms", roe);
                    break;
                }
                // Do a retry since region should be online on RS immediately
            } catch (IOException ie) {
                LOG.warn("Failed sending merge to " + region_a_location + " for region "
                        + region_a.getEncodedName() + "," + region_b.getEncodedName() + ", focible=" + forcible,
                        ie);
                break;
            }
        }
    } else {
        LOG.info("Cancel merging regions " + region_a.getRegionNameAsString() + ", "
                + region_b.getRegionNameAsString() + ", because can't move them together after "
                + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms");
    }
}

From source file:org.hyperic.hq.bizapp.agent.server.SSLConnectionListener.java

public AgentServerConnection getNewConnection() throws AgentConnectionException, InterruptedIOException {
    AgentServerConnection res;//from   w  ww . j  a v a 2  s.c om
    SSLSocket inConn = null;
    boolean success = false;
    try {
        inConn = (SSLSocket) this.listenSock.accept();
        inConn.setSoTimeout(READ_TIMEOUT);
    } catch (InterruptedIOException exc) {
        throw exc;
    } catch (IOException exc) {
        throw new AgentConnectionException(exc.getMessage(), exc);
    }
    try {
        res = handleNewConn(inConn);
        success = true;
    } catch (SocketTimeoutException e) {
        InterruptedIOException toThrow = new InterruptedIOException();
        toThrow.initCause(e);
        log.warn("socket timed out while handling a command from the server: " + e);
        log.debug(e, e);
        throw toThrow;
    } finally {
        if (!success) {
            close(inConn);
        }
    }
    return res;
}

From source file:com.alibaba.wasp.master.handler.TableEventHandler.java

/**
 * Table modifications are processed asynchronously, but provide an API for
 * you to query their status.//from  w w  w  . j  a v a 2  s . c  o  m
 * 
 * @throws java.io.IOException
 */
public synchronized void waitForPersist() throws IOException {
    if (!persistedToZk) {
        try {
            wait();
        } catch (InterruptedException ie) {
            throw (IOException) new InterruptedIOException().initCause(ie);
        }
        assert persistedToZk;
    }
}

From source file:org.cryptomator.frontend.webdav.mount.WindowsWebDavMounter.java

private void addProxyOverrides(URI uri) throws IOException, CommandFailedException {
    try {//from  w  w  w .j  a  v a2s.co  m
        // get existing value for ProxyOverride key from reqistry:
        ProcessBuilder query = new ProcessBuilder("reg", "query",
                "\"HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\"", "/v",
                "ProxyOverride");
        Process queryCmd = query.start();
        String queryStdOut = IOUtils.toString(queryCmd.getInputStream(), StandardCharsets.UTF_8);
        int queryResult = queryCmd.waitFor();

        // determine new value for ProxyOverride key:
        Set<String> overrides = new HashSet<>();
        Matcher matcher = REG_QUERY_PROXY_OVERRIDES_PATTERN.matcher(queryStdOut);
        if (queryResult == 0 && matcher.find()) {
            String[] existingOverrides = StringUtils.split(matcher.group(1), ';');
            overrides.addAll(Arrays.asList(existingOverrides));
        }
        overrides.removeIf(s -> s.startsWith(uri.getHost() + ":"));
        overrides.add("<local>");
        overrides.add(uri.getHost());
        overrides.add(uri.getHost() + ":" + uri.getPort());

        // set new value:
        String overridesStr = StringUtils.join(overrides, ';');
        ProcessBuilder add = new ProcessBuilder("reg", "add",
                "\"HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\"", "/v",
                "ProxyOverride", "/d", "\"" + overridesStr + "\"", "/f");
        LOG.debug("Invoking command: " + StringUtils.join(add.command(), ' '));
        Process addCmd = add.start();
        int addResult = addCmd.waitFor();
        if (addResult != 0) {
            String addStdErr = IOUtils.toString(addCmd.getErrorStream(), StandardCharsets.UTF_8);
            throw new CommandFailedException(addStdErr);
        }
    } catch (IOException | CommandFailedException e) {
        LOG.info("Failed to add proxy overrides", e);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        InterruptedIOException ioException = new InterruptedIOException();
        ioException.initCause(e);
        throw ioException;
    }
}

From source file:org.apache.hadoop.hbase.wal.LogRecoveredEditsOutputSink.java

/**
 * Close all of the output streams.//w  ww .  j a  v a  2s.c om
 * @return the list of paths written.
 */
List<Path> close() throws IOException {
    Preconditions.checkState(!closeAndCleanCompleted);

    final List<Path> paths = new ArrayList<>();
    final List<IOException> thrown = Lists.newArrayList();
    ThreadPoolExecutor closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS,
            new ThreadFactory() {
                private int count = 1;

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r, "split-log-closeStream-" + count++);
                    return t;
                }
            });
    CompletionService<Void> completionService = new ExecutorCompletionService<>(closeThreadPool);
    boolean progress_failed;
    try {
        progress_failed = executeCloseTask(completionService, thrown, paths);
    } catch (InterruptedException e) {
        IOException iie = new InterruptedIOException();
        iie.initCause(e);
        throw iie;
    } catch (ExecutionException e) {
        throw new IOException(e.getCause());
    } finally {
        closeThreadPool.shutdownNow();
    }
    if (!thrown.isEmpty()) {
        throw MultipleIOException.createIOException(thrown);
    }
    writersClosed = true;
    closeAndCleanCompleted = true;
    if (progress_failed) {
        return null;
    }
    return paths;
}

From source file:org.marketcetera.util.except.ExceptUtilsTest.java

@Test
public void interrupt() {
    interruptHelper(new CloneNotSupportedException(), false);
    interruptHelper(new InterruptedException(), true);
    interruptHelper(new InterruptedIOException(), true);
    interruptHelper(new ClosedByInterruptException(), true);
    interruptHelper(new FileLockInterruptionException(), true);
    interruptHelper(new InterruptedNamingException(), true);
    interruptHelper(new I18NInterruptedException(), true);
    interruptHelper(new I18NInterruptedRuntimeException(), true);
}

From source file:org.marketcetera.util.except.ExceptUtilsTest.java

@Test
public void swallow() {
    swallowHelper(new CloneNotSupportedException(), false);
    swallowHelper(new InterruptedException(), true);
    swallowHelper(new InterruptedIOException(), true);
    swallowHelper(new ClosedByInterruptException(), true);
    swallowHelper(new FileLockInterruptionException(), true);
    swallowHelper(new InterruptedNamingException(), true);
    swallowHelper(new I18NInterruptedException(), true);
    swallowHelper(new I18NInterruptedRuntimeException(), true);
}

From source file:org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler.java

/**
 * Failed many times, shutdown processing
 * @throws IOException/*  ww w.  j a  v  a  2  s.c o  m*/
 */
private void verifyAndAssignMetaWithRetries() throws IOException {
    int iTimes = this.server.getConfiguration().getInt("hbase.catalog.verification.retries", 10);

    long waitTime = this.server.getConfiguration().getLong("hbase.catalog.verification.timeout", 1000);

    int iFlag = 0;
    while (true) {
        try {
            verifyAndAssignMeta();
            break;
        } catch (KeeperException e) {
            this.server.abort("In server shutdown processing, assigning meta", e);
            throw new IOException("Aborting", e);
        } catch (Exception e) {
            if (iFlag >= iTimes) {
                this.server.abort("verifyAndAssignMeta failed after" + iTimes + " times retries, aborting", e);
                throw new IOException("Aborting", e);
            }
            try {
                Thread.sleep(waitTime);
            } catch (InterruptedException e1) {
                LOG.warn("Interrupted when is the thread sleep", e1);
                Thread.currentThread().interrupt();
                throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
            }
            iFlag++;
        }
    }
}