List of usage examples for java.io IOException getCause
public synchronized Throwable getCause()
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java
@Test(timeout = 300000) @Ignore("Need HADOOP-6886, HADOOP-6840, & HDFS-617 for this. HDFS 0.20.205.1+ should have this") public void testLogRollAfterSplitStart() throws IOException { HLog log = null;/*from w w w.jav a 2s. com*/ String logName = "testLogRollAfterSplitStart"; Path thisTestsDir = new Path(HBASEDIR, logName); try { // put some entries in an HLog TableName tableName = TableName.valueOf(this.getClass().getName()); HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); log = HLogFactory.createHLog(fs, HBASEDIR, logName, conf); final AtomicLong sequenceId = new AtomicLong(1); final int total = 20; for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("column")); log.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd, sequenceId); } // Send the data to HDFS datanodes and close the HDFS writer log.sync(); ((FSHLog) log).replaceWriter(((FSHLog) log).getOldPath(), null, null, null); /* code taken from ProcessServerShutdown.process() * handles RS shutdowns (as observed by the Master) */ // rename the directory so a rogue RS doesn't create more HLogs Path rsSplitDir = new Path(thisTestsDir.getParent(), thisTestsDir.getName() + "-splitting"); fs.rename(thisTestsDir, rsSplitDir); LOG.debug("Renamed region directory: " + rsSplitDir); // Process the old log files HLogSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf); // Now, try to roll the HLog and verify failure try { log.rollWriter(); Assert.fail("rollWriter() did not throw any exception."); } catch (IOException ioe) { if (ioe.getCause().getMessage().contains("FileNotFound")) { LOG.info("Got the expected exception: ", ioe.getCause()); } else { Assert.fail("Unexpected exception: " + ioe); } } } finally { if (log != null) { log.close(); } if (fs.exists(thisTestsDir)) { fs.delete(thisTestsDir, true); } } }
From source file:de.clusteval.run.result.ClusteringRunResult.java
/** * Convert to.//from ww w. j a v a 2s . c om * * @param format * the format * @param internalParams * Internal parameters used to produced the clustering result * needed for parsing parameters. * @param params * Parameters used to produced the clustering result needed for * parsing parameters. * @return the clustering result * @throws NoRunResultFormatParserException * the no run result format parser exception * @throws RunResultNotFoundException * @throws RegisterException */ @SuppressWarnings("unused") public ClusteringRunResult convertTo(final RunResultFormat format, final Map<String, String> internalParams, final Map<String, String> params) throws NoRunResultFormatParserException, RunResultNotFoundException, RegisterException { ClusteringRunResult result = null; RunResultFormatParser p = null; if (!new File(this.absPath.getAbsolutePath()).exists()) throw new RunResultNotFoundException( "The result file " + this.absPath.getAbsolutePath() + " does not exist!"); /* * We already have the same format */ if (this.getResultFormat().equals(format)) { /* * Just copy the result file and return the corresponding * ClusteringRunResult */ try { org.apache.commons.io.FileUtils.copyFile(new File(this.getAbsolutePath()), new File(this.getAbsolutePath() + ".conv")); return new ClusteringRunResult(this.repository, System.currentTimeMillis(), new File(this.absPath.getAbsolutePath() + ".conv"), this.dataConfig, this.programConfig, format, this.runIdentString, run); } catch (IOException e) { e.printStackTrace(); } } try { p = this.repository.getRunResultFormatParser(this.getResultFormat().getClass().getName()) .getConstructor(Map.class, Map.class, String.class) .newInstance(internalParams, params, this.absPath.getAbsolutePath()); if (p != null) { p.convertToStandardFormat(); result = new ClusteringRunResult(this.repository, System.currentTimeMillis(), new File(this.absPath.getAbsolutePath() + ".conv"), this.dataConfig, this.programConfig, format, this.runIdentString, run); } } catch (InstantiationException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } catch (IllegalArgumentException e) { e.printStackTrace(); } catch (SecurityException e) { e.printStackTrace(); } catch (InvocationTargetException e) { if (e.getCause() instanceof FileNotFoundException | (e.getCause() instanceof IOException && e.getCause().getMessage().startsWith("Empty file given"))) { /* * Ensure, that all the files of this result are deleted */ FileUtils.delete(this.absPath); throw new RunResultNotFoundException(e.getCause().getMessage()); } e.printStackTrace(); } catch (IOException e) { /* * Ensure, that all the files of this result are deleted */ FileUtils.delete(this.absPath); throw new RunResultNotFoundException(e.getMessage()); } catch (NoSuchMethodException e) { e.printStackTrace(); } catch (NullPointerException e) { e.printStackTrace(); } return result; }
From source file:org.apache.hadoop.hdfs.server.namenode.TestEditLog.java
@Test public void testEditChecksum() throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null;//ww w . j a va2 s . c o m FileSystem fileSys = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNamesystem(); FSImage fsimage = namesystem.getFSImage(); final FSEditLog editLog = fsimage.getEditLog(); fileSys.mkdirs(new Path("/tmp")); Iterator<StorageDirectory> iter = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>(); while (iter.hasNext()) { sds.add(iter.next()); } editLog.close(); cluster.shutdown(); for (StorageDirectory sd : sds) { File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3); assertTrue(editFile.exists()); long fileLen = editFile.length(); LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen); RandomAccessFile rwf = new RandomAccessFile(editFile, "rw"); rwf.seek(fileLen - 4); // seek to checksum bytes int b = rwf.readInt(); rwf.seek(fileLen - 4); rwf.writeInt(b + 1); rwf.close(); } try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build(); fail("should not be able to start"); } catch (IOException e) { // expected assertNotNull("Cause of exception should be ChecksumException", e.getCause()); assertEquals("Cause of exception should be ChecksumException", ChecksumException.class, e.getCause().getClass()); } }
From source file:utybo.branchingstorytree.swing.OpenBSTGUI.java
/** * Load and parse a file, using appropriate dialogs if an error occurs to * inform the user and even give him the option to reload the file * * @param file// www . ja va2 s . c o m * The file to load * @param client * The BST Client. This is required for parsing the file * @return */ public void loadFile(final File file, final TabClient client, Consumer<BranchingStory> callback) { SwingWorker<BranchingStory, Object> worker = new SwingWorker<BranchingStory, Object>() { @Override protected BranchingStory doInBackground() throws Exception { try { LOG.trace("Parsing story"); String ext = FilenameUtils.getExtension(file.getName()); BranchingStory bs = null; if (ext.equals("bsp")) { bs = BSTPackager.fromPackage(new ProgressMonitorInputStream(instance, "Opening " + file.getName() + "...", new FileInputStream(file)), client); } else { bs = parser .parse(new BufferedReader(new InputStreamReader( new ProgressMonitorInputStream(instance, "Opening " + file.getName() + "...", new FileInputStream(file)), StandardCharsets.UTF_8)), new Dictionary(), client, "<main>"); client.setBRMHandler(new BRMFileClient(file, client, bs)); } callback.accept(bs); return bs; } catch (final IOException e) { LOG.error("IOException caught", e); showException(Lang.get("file.error").replace("$e", e.getClass().getSimpleName()).replace("$m", e.getMessage()), e); return null; } catch (final BSTException e) { LOG.error("BSTException caught", e); String s = "<html>" + Lang.get("file.bsterror.1"); s += Lang.get("file.bsterror.2"); s += Lang.get("file.bsterror.3").replace("$l", "" + e.getWhere()).replace("$f", "[main]"); if (e.getCause() != null) { s += Lang.get("file.bsterror.4").replace("$e", e.getCause().getClass().getSimpleName()) .replace("$m", e.getCause().getMessage()); } s += Lang.get("file.bsterror.5").replace("$m", "" + e.getMessage()); s += Lang.get("file.bsterror.6"); String s2 = s; if (doAndReturn(() -> Messagers.showConfirm(instance, s2, Messagers.OPTIONS_YES_NO, Messagers.TYPE_ERROR, Lang.get("bsterror"))) == Messagers.OPTION_YES) { LOG.debug("Reloading"); return doInBackground(); } return null; } catch (final Exception e) { LOG.error("Random exception caught", e); showException(Lang.get("file.crash"), e); return null; } } private <T> T doAndReturn(Supplier<T> supplier) { ArrayList<T> l = new ArrayList<>(); invokeSwingAndWait(() -> { l.add(supplier.get()); }); return l.size() == 0 ? null : l.get(0); } @Override protected void done() { try { get(); } catch (InterruptedException e) { // Shouldn't happen } catch (ExecutionException e) { LOG.error("Random exception caught", e); Messagers.showException(instance, Lang.get("file.crash"), e); } } }; worker.execute(); }
From source file:org.apache.hadoop.hbase.regionserver.SplitLogWorker.java
public SplitLogWorker(final ZooKeeperWatcher watcher, final Configuration conf, final RegionServerServices server, final LastSequenceId sequenceIdChecker) { this(watcher, conf, server, new TaskExecutor() { @Override//from w ww.j a va 2 s . c om public Status exec(String filename, CancelableProgressable p) { Path rootdir; FileSystem fs; try { rootdir = FSUtils.getRootDir(conf); fs = rootdir.getFileSystem(conf); } catch (IOException e) { LOG.warn("could not find root dir or fs", e); return Status.RESIGNED; } // TODO have to correctly figure out when log splitting has been // interrupted or has encountered a transient error and when it has // encountered a bad non-retry-able persistent error. try { if (!HLogSplitter.splitLogFile(rootdir, fs.getFileStatus(new Path(rootdir, filename)), fs, conf, p, sequenceIdChecker, watcher, server.getCoordinatedStateManager())) { return Status.PREEMPTED; } } catch (InterruptedIOException iioe) { LOG.warn("log splitting of " + filename + " interrupted, resigning", iioe); return Status.RESIGNED; } catch (IOException e) { Throwable cause = e.getCause(); if (e instanceof RetriesExhaustedException && (cause instanceof NotServingRegionException || cause instanceof ConnectException || cause instanceof SocketTimeoutException)) { LOG.warn("log replaying of " + filename + " can't connect to the target regionserver, " + "resigning", e); return Status.RESIGNED; } else if (cause instanceof InterruptedException) { LOG.warn("log splitting of " + filename + " interrupted, resigning", e); return Status.RESIGNED; } else if (cause instanceof KeeperException) { LOG.warn("log splitting of " + filename + " hit ZooKeeper issue, resigning", e); return Status.RESIGNED; } LOG.warn("log splitting of " + filename + " failed, returning error", e); return Status.ERR; } return Status.DONE; } }); }
From source file:org.apache.pulsar.functions.worker.rest.api.FunctionsImpl.java
private Response updateRequest(FunctionMetaData functionMetaData, InputStream uploadedInputStream) { // Upload to bookkeeper try {//from www . j a va 2s . c om log.info("Uploading function package to {}", functionMetaData.getPackageLocation()); Utils.uploadToBookeeper(worker().getDlogNamespace(), uploadedInputStream, functionMetaData.getPackageLocation().getPackagePath()); } catch (IOException e) { log.error("Error uploading file {}", functionMetaData.getPackageLocation(), e); return Response.serverError().type(MediaType.APPLICATION_JSON).entity(new ErrorData(e.getMessage())) .build(); } // Submit to FMT FunctionMetaDataManager functionMetaDataManager = worker().getFunctionMetaDataManager(); CompletableFuture<RequestResult> completableFuture = functionMetaDataManager .updateFunction(functionMetaData); RequestResult requestResult = null; try { requestResult = completableFuture.get(); if (!requestResult.isSuccess()) { return Response.status(Status.BAD_REQUEST).type(MediaType.APPLICATION_JSON) .entity(new ErrorData(requestResult.getMessage())).build(); } } catch (ExecutionException e) { return Response.serverError().type(MediaType.APPLICATION_JSON) .entity(new ErrorData(e.getCause().getMessage())).build(); } catch (InterruptedException e) { return Response.status(Status.REQUEST_TIMEOUT).type(MediaType.APPLICATION_JSON) .entity(new ErrorData(e.getCause().getMessage())).build(); } return Response.status(Status.OK).build(); }
From source file:org.apache.hadoop.hdfs.TestFileConcurrentReader.java
private void runTestUnfinishedBlockCRCError(final boolean transferToAllowed, final SyncType syncType, final int writeSize, Configuration conf) throws IOException { conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, transferToAllowed); init(conf);//from w w w . j av a 2 s. c o m final Path file = new Path("/block-being-written-to"); final int numWrites = 2000; final AtomicBoolean writerDone = new AtomicBoolean(false); final AtomicBoolean writerStarted = new AtomicBoolean(false); final AtomicBoolean error = new AtomicBoolean(false); final FSDataOutputStream initialOutputStream = fileSystem.create(file); final Thread writer = new Thread(new Runnable() { private FSDataOutputStream outputStream = initialOutputStream; @Override public void run() { try { for (int i = 0; !error.get() && i < numWrites; i++) { try { final byte[] writeBuf = DFSTestUtil.generateSequentialBytes(i * writeSize, writeSize); outputStream.write(writeBuf); if (syncType == SyncType.SYNC) { outputStream.hflush(); } else { // append outputStream.close(); outputStream = fileSystem.append(file); } writerStarted.set(true); } catch (IOException e) { error.set(true); LOG.error("error writing to file", e); } } writerDone.set(true); outputStream.close(); } catch (Exception e) { LOG.error("error in writer", e); throw new RuntimeException(e); } } }); Thread tailer = new Thread(new Runnable() { @Override public void run() { try { long startPos = 0; while (!writerDone.get() && !error.get()) { if (writerStarted.get()) { try { startPos = tailFile(file, startPos); } catch (IOException e) { LOG.error(String.format("error tailing file %s", file), e); throw new RuntimeException(e); } } } } catch (RuntimeException e) { if (e.getCause() instanceof ChecksumException) { error.set(true); } writer.interrupt(); LOG.error("error in tailer", e); throw e; } } }); writer.start(); tailer.start(); try { writer.join(); tailer.join(); assertFalse("error occurred, see log above", error.get()); } catch (InterruptedException e) { LOG.info("interrupted waiting for writer or tailer to complete"); Thread.currentThread().interrupt(); } initialOutputStream.close(); }
From source file:hudson.plugins.sshslaves.SSHLauncher.java
protected void openConnection(TaskListener listener) throws IOException, InterruptedException { listener.getLogger().println(Messages.SSHLauncher_OpeningSSHConnection(getTimestamp(), host + ":" + port)); connection.setTCPNoDelay(true);/*from w w w. jav a 2s .com*/ int maxNumRetries = this.maxNumRetries == null || this.maxNumRetries < 0 ? 0 : this.maxNumRetries; for (int i = 0; i <= maxNumRetries; i++) { try { connection.connect(); break; } catch (IOException ioexception) { listener.getLogger().println(ioexception.getCause().getMessage()); String ioExceptionMessageCause = ""; if (ioexception.getCause() != null) { ioExceptionMessageCause = ioexception.getCause().getMessage(); } if (!ioExceptionMessageCause.equals("Connection refused")) { break; } if (maxNumRetries - i > 0) { listener.getLogger() .println("SSH Connection failed with IOException: \"" + ioExceptionMessageCause + "\", retrying in " + retryWaitTime + " seconds. There are " + (maxNumRetries - i) + " more retries left."); } else { listener.getLogger().println( "SSH Connection failed with IOException: \"" + ioExceptionMessageCause + "\"."); throw ioexception; } } Thread.sleep(TimeUnit.SECONDS.toMillis(retryWaitTime)); } StandardUsernameCredentials credentials = getCredentials(); if (credentials == null) { throw new AbortException("Cannot find SSH User credentials with id: " + credentialsId); } if (SSHAuthenticator.newInstance(connection, credentials).authenticate(listener) && connection.isAuthenticationComplete()) { listener.getLogger().println(Messages.SSHLauncher_AuthenticationSuccessful(getTimestamp())); } else { listener.getLogger().println(Messages.SSHLauncher_AuthenticationFailed(getTimestamp())); throw new AbortException(Messages.SSHLauncher_AuthenticationFailedException()); } }
From source file:com.github.pascalgn.jiracli.web.HttpClient.java
private <T> T doExecute(HttpUriRequest request, boolean retry, Function<HttpEntity, T> function) { LOGGER.debug("Calling URL: {} [{}]", request.getURI(), request.getMethod()); // disable XSRF check: if (!request.containsHeader("X-Atlassian-Token")) { request.addHeader("X-Atlassian-Token", "nocheck"); }/*from w ww . j ava2 s.c om*/ HttpResponse response; try { response = httpClient.execute(request, httpClientContext); } catch (IOException e) { if (Thread.interrupted()) { LOGGER.trace("Could not call URL: {}", request.getURI(), e); throw new InterruptedError(); } else { throw new IllegalStateException("Could not call URL: " + request.getURI(), e); } } LOGGER.debug("Response received ({})", response.getStatusLine().toString().trim()); HttpEntity entity = response.getEntity(); try { if (Thread.interrupted()) { throw new InterruptedError(); } int statusCode = response.getStatusLine().getStatusCode(); if (isSuccess(statusCode)) { T result; try { result = function.apply(entity, Hint.none()); } catch (NotAuthenticatedException e) { if (retry) { resetAuthentication(); setCredentials(); return doExecute(request, false, function); } else { throw e.getCause(); } } catch (RuntimeException e) { if (Thread.interrupted()) { LOGGER.trace("Could not call URL: {}", request.getURI(), e); throw new InterruptedError(); } else { throw e; } } if (Thread.interrupted()) { throw new InterruptedError(); } return result; } else { if (statusCode == HttpURLConnection.HTTP_UNAUTHORIZED) { resetAuthentication(); if (retry) { setCredentials(); return doExecute(request, false, function); } else { String error = readErrorResponse(request.getURI(), entity); LOGGER.debug("Unauthorized [401]: {}", error); throw new AccessControlException("Unauthorized [401]: " + request.getURI()); } } else if (statusCode == HttpURLConnection.HTTP_FORBIDDEN) { resetAuthentication(); checkAccountLocked(response); if (retry) { setCredentials(); return doExecute(request, false, function); } else { throw new AccessControlException("Forbidden [403]: " + request.getURI()); } } else { String status = response.getStatusLine().toString().trim(); String message; if (entity == null) { message = status; } else { String error = readErrorResponse(request.getURI(), entity); message = status + (error.isEmpty() ? "" : ": " + error); } if (Thread.interrupted()) { throw new InterruptedError(); } if (statusCode == HttpURLConnection.HTTP_NOT_FOUND) { throw new NoSuchElementException(message); } else { throw new IllegalStateException(message); } } } } finally { EntityUtils.consumeQuietly(entity); } }
From source file:org.springframework.integration.ip.tcp.TcpOutboundGatewayTests.java
private void testGWPropagatesSocketCloseGuts(final int port, AbstractClientConnectionFactory ccf) throws Exception { final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean done = new AtomicBoolean(); final AtomicReference<String> lastReceived = new AtomicReference<String>(); final CountDownLatch serverLatch = new CountDownLatch(1); Executors.newSingleThreadExecutor().execute(new Runnable() { public void run() { try { ServerSocket server = ServerSocketFactory.getDefault().createServerSocket(port); latch.countDown();/*from w ww .j ava 2s. c o m*/ int i = 0; while (!done.get()) { Socket socket = server.accept(); i++; while (!socket.isClosed()) { try { ObjectInputStream ois = new ObjectInputStream(socket.getInputStream()); String request = (String) ois.readObject(); logger.debug("Read " + request + " closing socket"); socket.close(); lastReceived.set(request); serverLatch.countDown(); } catch (IOException e) { socket.close(); } } } } catch (Exception e) { if (!done.get()) { e.printStackTrace(); } } } }); assertTrue(latch.await(10000, TimeUnit.MILLISECONDS)); final TcpOutboundGateway gateway = new TcpOutboundGateway(); gateway.setConnectionFactory(ccf); gateway.setRequestTimeout(Integer.MAX_VALUE); QueueChannel replyChannel = new QueueChannel(); gateway.setRequiresReply(true); gateway.setOutputChannel(replyChannel); gateway.setRemoteTimeout(5000); gateway.afterPropertiesSet(); gateway.start(); try { gateway.handleMessage(MessageBuilder.withPayload("Test").build()); fail("expected failure"); } catch (Exception e) { assertTrue(e.getCause() instanceof EOFException); } assertEquals(0, TestUtils.getPropertyValue(gateway, "pendingReplies", Map.class).size()); Message<?> reply = replyChannel.receive(0); assertNull(reply); done.set(true); ccf.getConnection(); }