List of usage examples for java.io IOException getClass
@HotSpotIntrinsicCandidate public final native Class<?> getClass();
From source file:org.apache.http.impl.client.StatiscicsLoggingRequestDirector.java
/** * Establish connection either directly or through a tunnel and retry in case of * a recoverable I/O failure/*from w w w .j a v a2s . co m*/ */ private void tryConnect(final RoutedRequest req, final HttpContext context) throws HttpException, IOException { HttpRoute route = req.getRoute(); int connectCount = 0; for (;;) { // Increment connect count connectCount++; try { if (!managedConn.isOpen()) { managedConn.open(route, context, params); } else { managedConn.setSocketTimeout(HttpConnectionParams.getSoTimeout(params)); } establishRoute(route, context); break; } catch (IOException ex) { try { managedConn.close(); } catch (IOException ignore) { } if (retryHandler.retryRequest(ex, connectCount, context)) { if (this.log.isInfoEnabled()) { this.log.info("I/O exception (" + ex.getClass().getName() + ") caught when connecting to the target host: " + ex.getMessage()); } if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage(), ex); } this.log.info("Retrying connect"); } else { throw ex; } } } }
From source file:org.sakaiproject.search.journal.impl.JournaledFSIndexStorage.java
public List<Object[]> getSegmentInfoList() { List<Object[]> seginfo = new ArrayList<Object[]>(); try {/*from w w w .j a va 2 s . c o m*/ SizeAction sa = new SizeAction(); File searchDir = new File(journalSettings.getSearchIndexDirectory()); FileUtils.recurse(searchDir, sa); seginfo.add(new Object[] { "mainsegment", sa.sizeToString(sa.getSize()), sa.dateToString(sa.getLastUpdated()) }); sa.reset(); for (File s : segments) { FileUtils.recurse(s, sa); seginfo.add(new Object[] { s.getName(), sa.sizeToString(sa.getSize()), sa.dateToString(sa.getLastUpdated()) }); sa.reset(); } seginfo.add(new Object[] { "Total", sa.sizeToString(sa.getTotalSize()), "" }); } catch (IOException ex) { if (log.isDebugEnabled()) { ex.printStackTrace(); } seginfo.add(new Object[] { "Failed to get Segment Info list " + ex.getClass().getName() + " " + ex.getMessage() }); } return seginfo; }
From source file:org.apache.manifoldcf.agents.output.solr.HttpPoster.java
/** Handle an IOException. * I'm not actually sure where these exceptions come from in SolrJ, but we handle them * as real I/O errors, meaning they should be retried. */// w ww . jav a 2s . c o m protected static void handleIOException(IOException e, String context) throws ManifoldCFException, ServiceInterruption { if ((e instanceof InterruptedIOException) && (!(e instanceof java.net.SocketTimeoutException))) throw new ManifoldCFException(e.getMessage(), ManifoldCFException.INTERRUPTED); long currentTime = System.currentTimeMillis(); if (e instanceof java.net.ConnectException) { // Server isn't up at all. Try for a brief time then give up. String message = "Server could not be contacted during " + context + ": " + e.getMessage(); Logging.ingest.warn(message, e); throw new ServiceInterruption(message, e, currentTime + interruptionRetryTime, -1L, 3, true); } if (e instanceof java.net.SocketTimeoutException) { String message2 = "Socket timeout exception during " + context + ": " + e.getMessage(); Logging.ingest.warn(message2, e); throw new ServiceInterruption(message2, e, currentTime + interruptionRetryTime, currentTime + 20L * 60000L, -1, false); } if (e.getClass().getName().equals("java.net.SocketException")) { // In the past we would have treated this as a straight document rejection, and // treated it in the same manner as a 400. The reasoning is that the server can // perfectly legally send out a 400 and drop the connection immediately thereafter, // this a race condition. // However, Solr 4.0 (or the Jetty version that the example runs on) seems // to have a bug where it drops the connection when two simultaneous documents come in // at the same time. This is the final version of Solr 4.0 so we need to deal with // this. if (e.getMessage().toLowerCase(Locale.ROOT).indexOf("broken pipe") != -1 || e.getMessage().toLowerCase(Locale.ROOT).indexOf("connection reset") != -1 || e.getMessage().toLowerCase(Locale.ROOT).indexOf("target server failed to respond") != -1) { // Treat it as a service interruption, but with a limited number of retries. // In that way we won't burden the user with a huge retry interval; it should // give up fairly quickly, and yet NOT give up if the error was merely transient String message = "Server dropped connection during " + context + ": " + e.getMessage(); Logging.ingest.warn(message, e); throw new ServiceInterruption(message, e, currentTime + interruptionRetryTime, -1L, 3, false); } // Other socket exceptions are service interruptions - but if we keep getting them, it means // that a socket timeout is probably set too low to accept this particular document. So // we retry for a while, then skip the document. String message2 = "Socket exception during " + context + ": " + e.getMessage(); Logging.ingest.warn(message2, e); throw new ServiceInterruption(message2, e, currentTime + interruptionRetryTime, currentTime + 20L * 60000L, -1, false); } // Otherwise, no idea what the trouble is, so presume that retries might fix it. String message3 = "IO exception during " + context + ": " + e.getMessage(); Logging.ingest.warn(message3, e); throw new ServiceInterruption(message3, e, currentTime + interruptionRetryTime, currentTime + 2L * 60L * 60000L, -1, true); }
From source file:org.siddhiesb.transport.passthru.SourceHandler.java
private void logIOException(NHttpServerConnection conn, IOException e) { // this check feels like crazy! But weird things happened, when load testing. if (e == null) { return;/* ww w . j ava 2s. co m*/ } if (e instanceof ConnectionClosedException || (e.getMessage() != null && (e.getMessage().toLowerCase().contains("connection reset by peer") || e.getMessage().toLowerCase().contains("forcibly closed")))) { if (log.isDebugEnabled()) { log.debug( conn + ": I/O error (Probably the keepalive connection " + "was closed):" + e.getMessage()); } } else if (e.getMessage() != null) { String msg = e.getMessage().toLowerCase(); if (msg.indexOf("broken") != -1) { log.warn("I/O error (Probably the connection " + "was closed by the remote party):" + e.getMessage()); } else { log.error("I/O error: " + e.getMessage(), e); } } else { log.error("Unexpected I/O error: " + e.getClass().getName(), e); } }
From source file:org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader.java
protected IOException addFileInfoToException(final IOException ioe) throws IOException { long pos = -1; try {/*from w ww. j a v a 2 s . c o m*/ pos = getPosition(); } catch (IOException e) { LOG.warn("Failed getting position to add to throw", e); } // See what SequenceFile.Reader thinks is the end of the file long end = Long.MAX_VALUE; try { Field fEnd = SequenceFile.Reader.class.getDeclaredField("end"); fEnd.setAccessible(true); end = fEnd.getLong(this.reader); } catch (NoSuchFieldException nfe) { /* reflection failure, keep going */ } catch (IllegalAccessException iae) { /* reflection failure, keep going */ } catch (Exception e) { /* All other cases. Should we handle it more aggressively? */ LOG.warn("Unexpected exception when accessing the end field", e); } String msg = (this.path == null ? "" : this.path.toString()) + ", entryStart=" + entryStart + ", pos=" + pos + ((end == Long.MAX_VALUE) ? "" : ", end=" + end) + ", edit=" + this.edit; // Enhance via reflection so we don't change the original class type try { return (IOException) ioe.getClass().getConstructor(String.class).newInstance(msg).initCause(ioe); } catch (NoSuchMethodException nfe) { /* reflection failure, keep going */ } catch (IllegalAccessException iae) { /* reflection failure, keep going */ } catch (Exception e) { /* All other cases. Should we handle it more aggressively? */ LOG.warn("Unexpected exception when accessing the end field", e); } return ioe; }
From source file:org.apache.http.impl.client.DefaultRequestDirector.java
/** * Execute request and retry in case of a recoverable I/O failure *//*from w w w . j av a2 s.c o m*/ private HttpResponse tryExecute(final RoutedRequest req, final HttpContext context) throws HttpException, IOException { final RequestWrapper wrapper = req.getRequest(); final HttpRoute route = req.getRoute(); HttpResponse response = null; Exception retryReason = null; for (;;) { // Increment total exec count (with redirects) execCount++; // Increment exec count for this particular request wrapper.incrementExecCount(); if (!wrapper.isRepeatable()) { this.log.debug("Cannot retry non-repeatable request"); if (retryReason != null) { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity. The cause lists the " + "reason the original request failed.", retryReason); } else { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity."); } } try { if (!managedConn.isOpen()) { // If we have a direct route to the target host // just re-open connection and re-try the request if (!route.isTunnelled()) { this.log.debug("Reopening the direct connection."); managedConn.open(route, context, params); } else { // otherwise give up this.log.debug("Proxied connection. Need to start over."); break; } } if (this.log.isDebugEnabled()) { this.log.debug("Attempt " + execCount + " to execute request"); } response = requestExec.execute(wrapper, managedConn, context); break; } catch (final IOException ex) { this.log.debug("Closing the connection."); try { managedConn.close(); } catch (final IOException ignore) { } if (retryHandler.retryRequest(ex, wrapper.getExecCount(), context)) { if (this.log.isInfoEnabled()) { this.log.info("I/O exception (" + ex.getClass().getName() + ") caught when processing request to " + route + ": " + ex.getMessage()); } if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage(), ex); } if (this.log.isInfoEnabled()) { this.log.info("Retrying request to " + route); } retryReason = ex; } else { if (ex instanceof NoHttpResponseException) { final NoHttpResponseException updatedex = new NoHttpResponseException( route.getTargetHost().toHostString() + " failed to respond"); updatedex.setStackTrace(ex.getStackTrace()); throw updatedex; } else { throw ex; } } } } return response; }
From source file:org.dasein.cloud.google.compute.server.ServerSupport.java
@Override public @Nonnull VirtualMachine launch(@Nonnull VMLaunchOptions withLaunchOptions) throws CloudException, InternalException { APITrace.begin(getProvider(), "launchVM"); // windows-cloud_windows-server-2012-r2-dc-v20150629 validateLaunchOptions(withLaunchOptions); // this will exception out on problem. try {/*from w w w . jav a2 s . c o m*/ Compute gce = provider.getGoogleCompute(); GoogleMethod method = new GoogleMethod(provider); String hostName = getCapabilities().getVirtualMachineNamingConstraints() .convertToValidName(withLaunchOptions.getHostName(), Locale.US); Instance instance = new Instance(); instance.setName(hostName); instance.setDescription(withLaunchOptions.getDescription()); if (withLaunchOptions.getStandardProductId().contains("+")) { instance.setMachineType(getProduct(withLaunchOptions.getStandardProductId()).getDescription()); } else { instance.setMachineType(getProduct( withLaunchOptions.getStandardProductId() + "+" + withLaunchOptions.getDataCenterId()) .getDescription()); } MachineImage image = provider.getComputeServices().getImageSupport() .getImage(withLaunchOptions.getMachineImageId()); AttachedDisk rootVolume = new AttachedDisk(); rootVolume.setBoot(Boolean.TRUE); rootVolume.setType("PERSISTENT"); rootVolume.setMode("READ_WRITE"); AttachedDiskInitializeParams params = new AttachedDiskInitializeParams(); // do not use withLaunchOptions.getFriendlyName() it is non compliant!!! params.setDiskName(hostName); // Not Optimum solution, update in core should come next release to have this be part of MachineImage try { String[] parts = withLaunchOptions.getMachineImageId().split("_"); Image img = gce.images().get(parts[0], parts[1]).execute(); Long size = img.getDiskSizeGb(); String diskSizeGb = size.toString(); if (null == diskSizeGb) { diskSizeGb = img.getUnknownKeys().get("diskSizeGb").toString(); } Long MinimumDiskSizeGb = Long.valueOf(diskSizeGb).longValue(); params.setDiskSizeGb(MinimumDiskSizeGb); } catch (Exception e) { params.setDiskSizeGb(10L); } if ((image != null) && (image.getTag("contentLink") != null)) params.setSourceImage((String) image.getTag("contentLink")); else throw new CloudException("Problem getting the contentLink tag value from the image for " + withLaunchOptions.getMachineImageId()); rootVolume.setInitializeParams(params); List<AttachedDisk> attachedDisks = new ArrayList<AttachedDisk>(); attachedDisks.add(rootVolume); if (withLaunchOptions.getVolumes().length > 0) { for (VolumeAttachment volume : withLaunchOptions.getVolumes()) { AttachedDisk vol = new AttachedDisk(); vol.setBoot(Boolean.FALSE); vol.setType("PERSISTENT"); vol.setMode("READ_WRITE"); vol.setAutoDelete(Boolean.FALSE); vol.setKind("compute#attachedDisk"); if (null != volume.getExistingVolumeId()) { vol.setDeviceName(volume.getExistingVolumeId()); vol.setSource(provider.getComputeServices().getVolumeSupport() .getVolume(volume.getExistingVolumeId()).getMediaLink()); } else { VolumeCreateOptions volumeOptions = volume.getVolumeToCreate(); volumeOptions.setDataCenterId(withLaunchOptions.getDataCenterId()); String newDisk = provider.getComputeServices().getVolumeSupport() .createVolume(volume.getVolumeToCreate()); vol.setDeviceName(newDisk); vol.setSource( provider.getComputeServices().getVolumeSupport().getVolume(newDisk).getMediaLink()); } attachedDisks.add(vol); } } instance.setDisks(attachedDisks); AccessConfig nicConfig = new AccessConfig(); nicConfig.setName("External NAT"); nicConfig.setType("ONE_TO_ONE_NAT");//Currently the only type supported if (withLaunchOptions.getStaticIpIds().length > 0) { nicConfig.setNatIP(withLaunchOptions.getStaticIpIds()[0]); } List<AccessConfig> accessConfigs = new ArrayList<AccessConfig>(); accessConfigs.add(nicConfig); NetworkInterface nic = new NetworkInterface(); nic.setName("nic0"); if (null != withLaunchOptions.getVlanId()) { VLAN vlan = provider.getNetworkServices().getVlanSupport().getVlan(withLaunchOptions.getVlanId()); nic.setNetwork(vlan.getTag("contentLink")); } else { nic.setNetwork( provider.getNetworkServices().getVlanSupport().getVlan("default").getTag("contentLink")); } nic.setAccessConfigs(accessConfigs); List<NetworkInterface> nics = new ArrayList<NetworkInterface>(); nics.add(nic); instance.setNetworkInterfaces(nics); instance.setCanIpForward(Boolean.FALSE); Scheduling scheduling = new Scheduling(); scheduling.setAutomaticRestart(Boolean.TRUE); scheduling.setOnHostMaintenance("TERMINATE"); instance.setScheduling(scheduling); Map<String, String> keyValues = new HashMap<String, String>(); if (withLaunchOptions.getBootstrapUser() != null && withLaunchOptions.getBootstrapKey() != null && !withLaunchOptions.getBootstrapUser().equals("") && !withLaunchOptions.getBootstrapKey().equals("")) { keyValues.put("sshKeys", withLaunchOptions.getBootstrapUser() + ":" + withLaunchOptions.getBootstrapKey()); } if (!withLaunchOptions.getMetaData().isEmpty()) { for (Map.Entry<String, Object> entry : withLaunchOptions.getMetaData().entrySet()) { keyValues.put(entry.getKey(), (String) entry.getValue()); } } if (!keyValues.isEmpty()) { Metadata metadata = new Metadata(); ArrayList<Metadata.Items> items = new ArrayList<Metadata.Items>(); for (Map.Entry<String, String> entry : keyValues.entrySet()) { Metadata.Items item = new Metadata.Items(); item.set("key", entry.getKey()); if ((entry.getValue() == null) || (entry.getValue().isEmpty() == true) || (entry.getValue().equals(""))) item.set("value", ""); // GCE HATES nulls... else item.set("value", entry.getValue()); items.add(item); } // https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/google-startup-scripts if (null != withLaunchOptions.getUserData()) { Metadata.Items item = new Metadata.Items(); item.set("key", "startup-script"); item.set("value", withLaunchOptions.getUserData()); items.add(item); } metadata.setItems(items); instance.setMetadata(metadata); } Tags tags = new Tags(); ArrayList<String> tagItems = new ArrayList<String>(); tagItems.add(hostName); // Each tag must be 1-63 characters long, and comply with RFC1035 tags.setItems(tagItems); instance.setTags(tags); String vmId = ""; try { Operation job = gce.instances().insert(provider.getContext().getAccountNumber(), withLaunchOptions.getDataCenterId(), instance).execute(); vmId = method.getOperationTarget(provider.getContext(), job, GoogleOperationType.ZONE_OPERATION, "", withLaunchOptions.getDataCenterId(), false); } catch (IOException ex) { if (ex.getClass() == GoogleJsonResponseException.class) { GoogleJsonResponseException gjre = (GoogleJsonResponseException) ex; throw new GoogleException(CloudErrorType.GENERAL, gjre.getStatusCode(), gjre.getContent(), gjre.getDetails().getMessage()); } else throw new CloudException("An error occurred launching the instance: " + ex.getMessage()); } catch (Exception e) { if ((e.getMessage().contains("The resource")) && (e.getMessage().contains("disks")) && (e.getMessage().contains("already exists"))) { throw new CloudException( "A disk named '" + withLaunchOptions.getFriendlyName() + "' already exists."); } else { throw new CloudException(e); } } if (!vmId.equals("")) { VirtualMachine vm = getVirtualMachine(vmId); if (withLaunchOptions.getMachineImageId().toLowerCase().contains("windows")) { // Generate the public/private key pair for encryption and decryption. KeyPair keys = null; try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(2048); keys = keyGen.genKeyPair(); } catch (NoSuchAlgorithmException e) { throw new InternalException(e); } resetPassword(vmId, withLaunchOptions.getDataCenterId(), keys); int retryCount = 20; while (retryCount-- > 0) { SerialPortOutput output = null; try { output = gce.instances().getSerialPortOutput(provider.getContext().getAccountNumber(), withLaunchOptions.getDataCenterId(), vmId).setPort(4).execute(); } catch (IOException e) { throw new CloudException(e); } System.out.println(output); // Get the last line - this will be a JSON string corresponding to the most recent password reset attempt. String[] entries = output.getContents().split("\n"); String outputEntry = entries[entries.length - 1]; // Parse output using the json-simple library. JSONParser parser = new JSONParser(); try { org.json.simple.JSONObject passwordDict = (org.json.simple.JSONObject) parser .parse(outputEntry); vm.setRootUser(passwordDict.get("userName").toString()); vm.setRootPassword( decryptPassword(passwordDict.get("encryptedPassword").toString(), keys)); break; } catch (Exception e) { } // ignore exception, just means metadata not yet avail. try { Thread.sleep(10000); } catch (InterruptedException e) { } } } return vm; } else { throw new CloudException( "Could not find the instance: " + withLaunchOptions.getFriendlyName() + " after launch."); } } finally { APITrace.end(); } }
From source file:org.vietspider.net.apache.DefaultRequestDirector.java
/** * Establish connection either directly or through a tunnel and retry in case of * a recoverable I/O failure//from ww w . ja v a 2s.co m */ private void tryConnect(final RoutedRequest req, final HttpContext context) throws HttpException, IOException { HttpRoute route = req.getRoute(); boolean retrying = true; int connectCount = 0; while (retrying) { // Increment connect count connectCount++; try { if (!managedConn.isOpen()) { managedConn.open(route, context, params); } else { managedConn.setSocketTimeout(HttpConnectionParams.getSoTimeout(params)); } establishRoute(route, context); retrying = false; } catch (IOException ex) { try { //vietspider if (managedConn != null) managedConn.close(); } catch (IOException ignore) { } if (retryHandler.retryRequest(ex, connectCount, context)) { if (this.log.isInfoEnabled()) { this.log.info("I/O exception (" + ex.getClass().getName() + ") caught when connecting to the target host: " + ex.getMessage()); } if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage(), ex); } this.log.info("Retrying connect"); } else { throw ex; } } } }
From source file:org.vietspider.net.apache.DefaultRequestDirector.java
/** * Execute request and retry in case of a recoverable I/O failure *///from ww w . ja va2 s . com private HttpResponse tryExecute(final RoutedRequest req, final HttpContext context) throws HttpException, IOException { RequestWrapper wrapper = req.getRequest(); HttpRoute route = req.getRoute(); HttpResponse response = null; boolean retrying = true; Exception retryReason = null; while (retrying) { // Increment total exec count (with redirects) execCount++; // Increment exec count for this particular request wrapper.incrementExecCount(); if (!wrapper.isRepeatable()) { this.log.debug("Cannot retry non-repeatable request"); if (retryReason != null) { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity. The cause lists the " + "reason the original request failed.", retryReason); } else { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity."); } } try { if (this.log.isDebugEnabled()) { this.log.debug("Attempt " + execCount + " to execute request"); } response = requestExec.execute(wrapper, managedConn, context); retrying = false; } catch (IOException ex) { this.log.debug("Closing the connection."); try { managedConn.close(); } catch (IOException ignore) { } if (retryHandler.retryRequest(ex, wrapper.getExecCount(), context)) { if (this.log.isInfoEnabled()) { this.log.info("I/O exception (" + ex.getClass().getName() + ") caught when processing request: " + ex.getMessage()); } if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage(), ex); } this.log.info("Retrying request"); retryReason = ex; } else { throw ex; } // If we have a direct route to the target host // just re-open connection and re-try the request if (!route.isTunnelled()) { this.log.debug("Reopening the direct connection."); managedConn.open(route, context, params); } else { // otherwise give up this.log.debug("Proxied connection. Need to start over."); retrying = false; } } } return response; }
From source file:org.apache.http.impl.client.StatiscicsLoggingRequestDirector.java
/** * Execute request and retry in case of a recoverable I/O failure *//*from www. j av a 2s. c o m*/ private HttpResponse tryExecute(final RoutedRequest req, final HttpContext context) throws HttpException, IOException { RequestWrapper wrapper = req.getRequest(); HttpRoute route = req.getRoute(); HttpResponse response = null; Exception retryReason = null; for (;;) { // Increment total exec count (with redirects) execCount++; // Increment exec count for this particular request wrapper.incrementExecCount(); if (!wrapper.isRepeatable()) { this.log.debug("Cannot retry non-repeatable request"); if (retryReason != null) { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity. The cause lists the " + "reason the original request failed.", retryReason); } else { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity."); } } try { if (!managedConn.isOpen()) { // If we have a direct route to the target host // just re-open connection and re-try the request if (!route.isTunnelled()) { this.log.debug("Reopening the direct connection."); managedConn.open(route, context, params); } else { // otherwise give up this.log.debug("Proxied connection. Need to start over."); break; } } if (this.log.isDebugEnabled()) { this.log.debug("Attempt " + execCount + " to execute request"); } response = requestExec.execute(wrapper, managedConn, context); break; } catch (IOException ex) { this.log.debug("Closing the connection."); try { managedConn.close(); } catch (IOException ignore) { } if (retryHandler.retryRequest(ex, wrapper.getExecCount(), context)) { if (this.log.isInfoEnabled()) { this.log.info("I/O exception (" + ex.getClass().getName() + ") caught when processing request: " + ex.getMessage()); } if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage(), ex); } this.log.info("Retrying request"); retryReason = ex; } else { throw ex; } } } return response; }