Example usage for java.io IOException getCause

List of usage examples for java.io IOException getCause

Introduction

In this page you can find the example usage for java.io IOException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:com.ngdata.sep.impl.fork.ForkedReplicationSource.java

/**
 * Open a reader on the current path/*from www  . jav a  2  s.  c o m*/
 *
 * @param sleepMultiplier by how many times the default sleeping time is augmented
 * @return true if we should continue with that file, false if we are over with it
 */
protected boolean openReader(int sleepMultiplier) {
    try {
        LOG.debug("Opening log for replication " + this.currentPath.getName() + " at "
                + this.repLogReader.getPosition());
        try {
            this.reader = repLogReader.openReader(this.currentPath);
        } catch (FileNotFoundException fnfe) {
            if (this.queueRecovered) {
                // We didn't find the log in the archive directory, look if it still
                // exists in the dead RS folder (there could be a chain of failures
                // to look at)
                LOG.info("NB dead servers : " + deadRegionServers.size());
                for (String curDeadServerName : deadRegionServers) {
                    Path deadRsDirectory = new Path(manager.getLogDir().getParent(), curDeadServerName);
                    Path[] locs = new Path[] { new Path(deadRsDirectory, currentPath.getName()),
                            new Path(deadRsDirectory.suffix(HLog.SPLITTING_EXT), currentPath.getName()), };
                    for (Path possibleLogLocation : locs) {
                        LOG.info("Possible location " + possibleLogLocation.toUri().toString());
                        if (this.manager.getFs().exists(possibleLogLocation)) {
                            // We found the right new location
                            LOG.info("Log " + this.currentPath + " still exists at " + possibleLogLocation);
                            // Breaking here will make us sleep since reader is null
                            return true;
                        }
                    }
                }
                // TODO What happens if the log was missing from every single location?
                // Although we need to check a couple of times as the log could have
                // been moved by the master between the checks
                // It can also happen if a recovered queue wasn't properly cleaned,
                // such that the znode pointing to a log exists but the log was
                // deleted a long time ago.
                // For the moment, we'll throw the IO and processEndOfFile
                throw new IOException("File from recovered queue is " + "nowhere to be found", fnfe);
            } else {
                // If the log was archived, continue reading from there
                Path archivedLogLocation = new Path(manager.getOldLogDir(), currentPath.getName());
                if (this.manager.getFs().exists(archivedLogLocation)) {
                    currentPath = archivedLogLocation;
                    LOG.info("Log " + this.currentPath + " was moved to " + archivedLogLocation);
                    // Open the log at the new location
                    this.openReader(sleepMultiplier);

                }
                // TODO What happens the log is missing in both places?
            }
        }
    } catch (IOException ioe) {
        if (ioe instanceof EOFException && isCurrentLogEmpty())
            return true;
        LOG.warn(peerClusterZnode + " Got: ", ioe);
        this.reader = null;
        if (ioe.getCause() instanceof NullPointerException) {
            // Workaround for race condition in HDFS-4380
            // which throws a NPE if we open a file before any data node has the most recent block
            // Just sleep and retry.  Will require re-reading compressed HLogs for compressionContext.
            LOG.warn("Got NPE opening reader, will retry.");
        } else if (sleepMultiplier == this.maxRetriesMultiplier) {
            // TODO Need a better way to determine if a file is really gone but
            // TODO without scanning all logs dir
            LOG.warn("Waited too long for this file, considering dumping");
            return !processEndOfFile();
        }
    }
    return true;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer.java

@Test(timeout = 20000)
public void testDTRonAppSubmission() throws IOException, InterruptedException, BrokenBarrierException {
    final Credentials credsx = new Credentials();
    final Token<DelegationTokenIdentifier> tokenx = mock(Token.class);
    when(tokenx.getKind()).thenReturn(KIND);
    DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(new Text("user1"), new Text("renewer"),
            new Text("user1"));
    when(tokenx.decodeIdentifier()).thenReturn(dtId1);
    credsx.addToken(new Text("token"), tokenx);
    doReturn(true).when(tokenx).isManaged();
    doThrow(new IOException("boom")).when(tokenx).renew(any(Configuration.class));
    // fire up the renewer
    final DelegationTokenRenewer dtr = createNewDelegationTokenRenewer(conf, counter);
    RMContext mockContext = mock(RMContext.class);
    when(mockContext.getSystemCredentialsForApps())
            .thenReturn(new ConcurrentHashMap<ApplicationId, ByteBuffer>());
    ClientRMService mockClientRMService = mock(ClientRMService.class);
    when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
    InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234);
    when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
    dtr.setRMContext(mockContext);//from w w w .j a va 2  s .c  o  m
    when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr);
    dtr.init(conf);
    dtr.start();

    try {
        dtr.addApplicationSync(mock(ApplicationId.class), credsx, false, "user");
        fail("Catch IOException on app submission");
    } catch (IOException e) {
        Assert.assertTrue(e.getMessage().contains(tokenx.toString()));
        Assert.assertTrue(e.getCause().toString().contains("boom"));
    }

}

From source file:com.ngdata.sep.impl.fork.ForkedReplicationSource.java

@Override
public void run() {
    connectToPeers();/*from w  ww. jav a 2  s  .  co  m*/
    // We were stopped while looping to connect to sinks, just abort
    if (!this.isActive()) {
        return;
    }
    int sleepMultiplier = 1;
    // delay this until we are in an asynchronous thread
    while (this.peerClusterId == null) {
        this.peerClusterId = zkHelper.getPeerUUID(this.peerId);
        if (this.peerClusterId == null) {
            if (sleepForRetries("Cannot contact the peer's zk ensemble", sleepMultiplier)) {
                sleepMultiplier++;
            }
        }
    }
    // resetting to 1 to reuse later
    sleepMultiplier = 1;

    LOG.info("Replicating " + clusterId + " -> " + peerClusterId);

    // If this is recovered, the queue is already full and the first log
    // normally has a position (unless the RS failed between 2 logs)
    if (this.queueRecovered) {
        try {
            this.repLogReader.setPosition(
                    this.zkHelper.getHLogRepPosition(this.peerClusterZnode, this.queue.peek().getName()));
        } catch (KeeperException e) {
            this.terminate("Couldn't get the position of this recovered queue " + peerClusterZnode, e);
        }
    }
    // Loop until we close down
    while (isActive()) {
        // Sleep until replication is enabled again
        if (!isPeerEnabled()) {
            if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        Path oldPath = getCurrentPath(); //note that in the current scenario,
        //oldPath will be null when a log roll
        //happens.
        // Get a new path
        boolean hasCurrentPath = getNextPath();
        if (getCurrentPath() != null && oldPath == null) {
            sleepMultiplier = 1; //reset the sleepMultiplier on a path change
        }
        if (!hasCurrentPath) {
            if (sleepForRetries("No log to process", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        boolean currentWALisBeingWrittenTo = false;
        //For WAL files we own (rather than recovered), take a snapshot of whether the
        //current WAL file (this.currentPath) is in use (for writing) NOW!
        //Since the new WAL paths are enqueued only after the prev WAL file
        //is 'closed', presence of an element in the queue means that
        //the previous WAL file was closed, else the file is in use (currentPath)
        //We take the snapshot now so that we are protected against races
        //where a new file gets enqueued while the current file is being processed
        //(and where we just finished reading the current file).
        if (!this.queueRecovered && queue.size() == 0) {
            currentWALisBeingWrittenTo = true;
        }
        // Open a reader on it
        if (!openReader(sleepMultiplier)) {
            // Reset the sleep multiplier, else it'd be reused for the next file
            sleepMultiplier = 1;
            continue;
        }

        // If we got a null reader but didn't continue, then sleep and continue
        if (this.reader == null) {
            if (sleepForRetries("Unable to open a reader", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }

        boolean gotIOE = false;
        currentNbOperations = 0;
        currentNbEntries = 0;
        currentSize = 0;
        try {
            if (readAllEntriesToReplicateOrNextFile(currentWALisBeingWrittenTo)) {
                continue;
            }
        } catch (IOException ioe) {
            LOG.warn(peerClusterZnode + " Got: ", ioe);
            gotIOE = true;
            if (ioe.getCause() instanceof EOFException) {

                boolean considerDumping = false;
                if (this.queueRecovered) {
                    try {
                        FileStatus stat = this.fs.getFileStatus(this.currentPath);
                        if (stat.getLen() == 0) {
                            LOG.warn(peerClusterZnode + " Got EOF and the file was empty");
                        }
                        considerDumping = true;
                    } catch (IOException e) {
                        LOG.warn(peerClusterZnode + " Got while getting file size: ", e);
                    }
                } else if (currentNbEntries != 0) {
                    LOG.warn(peerClusterZnode + " Got EOF while reading, " + "looks like this file is broken? "
                            + currentPath);
                    considerDumping = true;
                    currentNbEntries = 0;
                }

                if (considerDumping && sleepMultiplier == this.maxRetriesMultiplier && processEndOfFile()) {
                    continue;
                }
            }
        } finally {
            try {
                this.reader = null;
                this.repLogReader.closeReader();
            } catch (IOException e) {
                gotIOE = true;
                LOG.warn("Unable to finalize the tailing of a file", e);
            }
        }

        // If we didn't get anything to replicate, or if we hit a IOE,
        // wait a bit and retry.
        // But if we need to stop, don't bother sleeping
        if (this.isActive() && (gotIOE || currentNbEntries == 0)) {
            if (this.lastLoggedPosition != this.repLogReader.getPosition()) {
                this.manager.logPositionAndCleanOldLogs(this.currentPath, this.peerClusterZnode,
                        this.repLogReader.getPosition(), queueRecovered, currentWALisBeingWrittenTo);
                this.lastLoggedPosition = this.repLogReader.getPosition();
            }

            // SEP change -- be more responsive on a lightly-loaded cluster. This will not be necessary
            // once HBASE-7325 is available
            if (!gotIOE) {
                sleepMultiplier = 1;
            }

            if (sleepForRetries("Nothing to replicate", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        sleepMultiplier = 1;
        shipEdits(currentWALisBeingWrittenTo);

    }
    if (this.conn != null) {
        try {
            this.conn.close();
        } catch (IOException e) {
            LOG.debug("Attempt to close connection failed", e);
        }
    }
    LOG.debug("Source exiting " + peerId);
}

From source file:com.addthis.hydra.task.source.AbstractStreamFileDataSource.java

@Override
public void close() {
    if (shuttingDown.compareAndSet(false, true)) {
        log.info("closing stream file data source. preOpened={} queue={}", preOpened.size(), queue.size());
        try {/*from  ww w .  j  ava2 s .c  o  m*/
            log.info("Waiting up to {} seconds for outstanding worker tasks to complete.", latchTimeout);
            getUninterruptibly(aggregateWorkerFuture, latchTimeout, TimeUnit.SECONDS);
            log.info("All threads have finished.");

            log.debug("closing wrappers");
            closePreOpenedQueue();

            log.debug("shutting down mesh");
            //we may overwrite the local source variable and in doing so throw away the Persistance flag
            PersistentStreamFileSource baseSource = getSource();
            if (baseSource != null) {
                baseSource.shutdown();
            } else {
                log.warn("getSource() returned null and no source was shutdown");
            }

            closeMarkDB();
            log.info(fileStatsToString("shutdown complete"));
        } catch (IOException ex) {
            UncheckedIOException unchecked = new UncheckedIOException(ex);
            closeFuture.completeExceptionally(unchecked);
            throw unchecked;
        } catch (Throwable t) {
            closeFuture.completeExceptionally(t);
            throw propagate(t);
        }
        workerThreadPool.shutdown();
        closeFuture.complete(null);
    } else {
        try {
            closeFuture.join();
        } catch (CompletionException ex) {
            throw propagate(ex.getCause());
        }
    }
}

From source file:de.blizzy.backup.restore.RestoreDialog.java

private void restore(final Collection<Entry> entries) {
    String folder = null;//ww  w.j a  v  a2 s.co  m
    for (;;) {
        DirectoryDialog dlg = new DirectoryDialog(getShell(), SWT.SAVE);
        dlg.setText(Messages.Title_SelectOutputFolder);
        dlg.setFilterPath(folder);
        folder = dlg.open();
        if (folder == null) {
            break;
        }

        if (new File(folder).list().length > 0) {
            MessageDialog.openError(getShell(), Messages.Title_FolderNotEmpty, NLS.bind(Messages.FolderNotEmpty,
                    Utils.getSimpleName(new FileSystemFileOrFolder(new File(folder)))));
            continue;
        }

        break;
    }

    if (folder != null) {
        alwaysRestoreFromOlderBackups = null;

        final String myFolder = folder;
        Backup backup = (Backup) ((IStructuredSelection) backupsViewer.getSelection()).getFirstElement();
        final int backupId = backup.id;
        final int numEntries = backup.numEntries;
        final ProgressMonitorDialog dlg = new ProgressMonitorDialog(getShell());
        IRunnableWithProgress runnable = new IRunnableWithProgress() {
            @Override
            public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException {

                try {
                    monitor.beginTask(Messages.Title_RestoreFromBackup, numEntries);
                    for (Entry entry : entries) {
                        restoreEntry(entry, new File(myFolder), settings.getOutputFolder(), backupId, monitor,
                                dlg.getShell());
                    }
                } catch (IOException e) {
                    throw new InvocationTargetException(e);
                } finally {
                    monitor.done();
                }
            }
        };
        try {
            dlg.run(true, true, runnable);
        } catch (InvocationTargetException e) {
            // TODO
            BackupPlugin.getDefault().logError("error while restoring from backup", e.getCause()); //$NON-NLS-1$
        } catch (InterruptedException e) {
            // okay
        }
    }
}

From source file:com.nabla.wapp.server.csv.CsvReader.java

@Override
public Status next(T instance) throws FullErrorListException {
    Assert.argumentNotNull(instance);//from ww  w  . j a  v  a  2 s  . c om

    try {
        List<String> values;
        try {
            values = impl.read();
        } catch (IOException e) {
            if (log.isErrorEnabled())
                log.error("error while reading next csv line", e);
            errors.add(getLineNumber(), CommonServerErrors.INTERNAL_ERROR);
            return Status.ERROR;
        }
        if (values == null)
            return Status.EOF;
        if (columns.size() != values.size()) {
            errors.add(getLineNumber(), CommonServerErrors.INVALID_FIELD_COUNT);
            return Status.ERROR;
        }
        for (int c = 0; c < columns.size(); ++c) {
            try {
                columns.get(c).setValue(instance, values.get(c));
            } catch (Throwable e) {
                if (log.isErrorEnabled())
                    log.error("error while reading next csv line", e);
                errors.add(getLineNumber(), columns.get(c).getName(), CommonServerErrors.INVALID_VALUE);
            }
        }
        try {
            if (validate != null)
                validate.invoke(instance, errors);
        } catch (final InvocationTargetException e) {
            final Throwable ee = e.getCause();
            if (log.isErrorEnabled())
                log.error("error while validating next csv line", ee);
            if (ee != null && ee.getClass().equals(FullErrorListException.class))
                throw new FullErrorListException();
            else {
                errors.add(getLineNumber(), CommonServerErrors.INTERNAL_ERROR);
                return Status.ERROR;
            }
        }
        return errors.isEmpty() ? Status.SUCCESS : Status.ERROR;
    } catch (FullErrorListException e) {
        throw e;
    } catch (Throwable e) {
        return Status.ERROR;
    }
}

From source file:org.apache.nifi.remote.client.http.TestHttpClient.java

@Test
public void testReceiveTimeoutAfterDataExchange() throws Exception {

    try (SiteToSiteClient client = getDefaultBuilder().timeout(1, TimeUnit.SECONDS)
            .portName("output-timeout-data-ex").build()) {
        final Transaction transaction = client.createTransaction(TransferDirection.RECEIVE);
        assertNotNull(transaction);/*from  w  w w  .j a v  a  2s. c o  m*/

        DataPacket packet = transaction.receive();
        assertNotNull(packet);
        consumeDataPacket(packet);

        try {
            transaction.receive();
            fail();
        } catch (IOException e) {
            logger.info("An exception was thrown as expected.", e);
            assertTrue(e.getCause() instanceof SocketTimeoutException);
        }

        confirmShouldFail(transaction);
        completeShouldFail(transaction);
    }
}

From source file:org.apache.hadoop.hbase.wal.WALSplitter.java

static private Entry getNextLogLine(Reader in, Path path, boolean skipErrors)
        throws CorruptedLogFileException, IOException {
    try {/*from   w ww .  ja v a 2  s  . com*/
        return in.next();
    } catch (EOFException eof) {
        // truncated files are expected if a RS crashes (see HBASE-2643)
        LOG.info("EOF from wal " + path + ".  continuing");
        return null;
    } catch (IOException e) {
        // If the IOE resulted from bad file format,
        // then this problem is idempotent and retrying won't help
        if (e.getCause() != null && (e.getCause() instanceof ParseException
                || e.getCause() instanceof org.apache.hadoop.fs.ChecksumException)) {
            LOG.warn("Parse exception " + e.getCause().toString() + " from wal " + path + ".  continuing");
            return null;
        }
        if (!skipErrors) {
            throw e;
        }
        CorruptedLogFileException t = new CorruptedLogFileException(
                "skipErrors=true Ignoring exception" + " while parsing wal " + path + ". Marking as corrupted");
        t.initCause(e);
        throw t;
    }
}

From source file:de.ingrid.portal.interfaces.impl.IBUSInterfaceImpl.java

/**
 * Calling the searchAndDetail method at the iBus doing one call to the bus instead of two. Returned
 * are the IngridHitDetails.//from w ww .j a  v  a  2 s  . com
 */
public IngridHits searchAndDetail(IngridQuery query, int hitsPerPage, int currentPage, int startHit,
        int timeout, String[] reqParameter) throws Exception {
    IngridHits hits = null;
    try {
        if (log.isDebugEnabled()) {
            log.debug("iBus.search: IngridQuery = " + UtilsSearch.queryToString(query) + " / timeout=" + timeout
                    + ", hitsPerPage=" + hitsPerPage + ", currentPage=" + currentPage + ", startHit="
                    + startHit);
        }
        long start = System.currentTimeMillis();

        hits = bus.searchAndDetail(query, hitsPerPage, currentPage, startHit, timeout, reqParameter);

        if (log.isDebugEnabled()) {
            long duration = System.currentTimeMillis() - start;
            log.debug("iBus.search: finished !");
            log.debug("in " + duration + "ms");
        }
    } catch (java.io.IOException e) {
        if (log.isDebugEnabled()) {
            log.debug("Problems doing iBus search, query=" + UtilsSearch.queryToString(query) + " / timeout="
                    + timeout + ", hitsPerPage=" + hitsPerPage + ", currentPage=" + currentPage + ", startHit="
                    + startHit, e);
        } else if (log.isInfoEnabled()) {
            log.info("Problems doing iBus search, query=" + UtilsSearch.queryToString(query) + " / timeout="
                    + timeout + ", hitsPerPage=" + hitsPerPage + ", currentPage=" + currentPage + ", startHit="
                    + startHit + "[cause:" + e.getMessage() + "]");
        } else {
            log.warn("Problems doing iBus search, query=" + UtilsSearch.queryToString(query) + " / timeout="
                    + timeout + ", hitsPerPage=" + hitsPerPage + ", currentPage=" + currentPage + ", startHit="
                    + startHit + "[cause:" + e.getCause().getMessage() + "]", e);
        }
    } catch (Throwable t) {
        if (log.isErrorEnabled()) {
            log.error("Problems doing iBus search, query=" + UtilsSearch.queryToString(query) + " / timeout="
                    + timeout + ", hitsPerPage=" + hitsPerPage + ", currentPage=" + currentPage + ", startHit="
                    + startHit, t);
        }
        throw new Exception(t);
    }

    return hits;
}