List of usage examples for java.io InterruptedIOException InterruptedIOException
public InterruptedIOException()
InterruptedIOException
with null
as its error detail message. From source file:org.robolectric.shadows.httpclient.DefaultRequestDirector.java
public HttpResponse execute(HttpHost target, HttpRequest request, HttpContext context) throws HttpException, IOException { HttpRequest orig = request;//w w w . ja v a 2 s. c o m RequestWrapper origWrapper = wrapRequest(orig); origWrapper.setParams(params); HttpRoute origRoute = determineRoute(target, origWrapper, context); virtualHost = (HttpHost) orig.getParams().getParameter(ClientPNames.VIRTUAL_HOST); RoutedRequest roureq = new RoutedRequest(origWrapper, origRoute); long timeout = ConnManagerParams.getTimeout(params); int execCount = 0; boolean reuse = false; boolean done = false; try { HttpResponse response = null; while (!done) { // In this loop, the RoutedRequest may be replaced by a // followup request and route. The request and route passed // in the method arguments will be replaced. The original // request is still available in 'orig'. RequestWrapper wrapper = roureq.getRequest(); HttpRoute route = roureq.getRoute(); response = null; // See if we have a user token bound to the execution context Object userToken = context.getAttribute(ClientContext.USER_TOKEN); // Allocate connection if needed if (managedConn == null) { ClientConnectionRequest connRequest = connManager.requestConnection(route, userToken); if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setConnectionRequest(connRequest); } try { managedConn = connRequest.getConnection(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException interrupted) { InterruptedIOException iox = new InterruptedIOException(); iox.initCause(interrupted); throw iox; } if (HttpConnectionParams.isStaleCheckingEnabled(params)) { // validate connection if (managedConn.isOpen()) { this.log.debug("Stale connection check"); if (managedConn.isStale()) { this.log.debug("Stale connection detected"); managedConn.close(); } } } } if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setReleaseTrigger(managedConn); } // Reopen connection if needed if (!managedConn.isOpen()) { managedConn.open(route, context, params); } else { managedConn.setSocketTimeout(HttpConnectionParams.getSoTimeout(params)); } try { establishRoute(route, context); } catch (TunnelRefusedException ex) { if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage()); } response = ex.getResponse(); break; } // Reset headers on the request wrapper wrapper.resetHeaders(); // Re-write request URI if needed rewriteRequestURI(wrapper, route); // Use virtual host if set target = virtualHost; if (target == null) { target = route.getTargetHost(); } HttpHost proxy = route.getProxyHost(); // Populate the execution context context.setAttribute(ExecutionContext.HTTP_TARGET_HOST, target); context.setAttribute(ExecutionContext.HTTP_PROXY_HOST, proxy); context.setAttribute(ExecutionContext.HTTP_CONNECTION, managedConn); context.setAttribute(ClientContext.TARGET_AUTH_STATE, targetAuthState); context.setAttribute(ClientContext.PROXY_AUTH_STATE, proxyAuthState); // Run request protocol interceptors requestExec.preProcess(wrapper, httpProcessor, context); boolean retrying = true; Exception retryReason = null; while (retrying) { // Increment total exec count (with redirects) execCount++; // Increment exec count for this particular request wrapper.incrementExecCount(); if (!wrapper.isRepeatable()) { this.log.debug("Cannot retry non-repeatable request"); if (retryReason != null) { throw new NonRepeatableRequestException("Cannot retry request " + "with a non-repeatable request entity. The cause lists the " + "reason the original request failed: " + retryReason); } else { throw new NonRepeatableRequestException( "Cannot retry request " + "with a non-repeatable request entity."); } } try { if (this.log.isDebugEnabled()) { this.log.debug("Attempt " + execCount + " to execute request"); } response = requestExec.execute(wrapper, managedConn, context); retrying = false; } catch (IOException ex) { this.log.debug("Closing the connection."); managedConn.close(); if (retryHandler.retryRequest(ex, wrapper.getExecCount(), context)) { if (this.log.isInfoEnabled()) { this.log.info("I/O exception (" + ex.getClass().getName() + ") caught when processing request: " + ex.getMessage()); } if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage(), ex); } this.log.info("Retrying request"); retryReason = ex; } else { throw ex; } // If we have a direct route to the target host // just re-open connection and re-try the request if (!route.isTunnelled()) { this.log.debug("Reopening the direct connection."); managedConn.open(route, context, params); } else { // otherwise give up this.log.debug("Proxied connection. Need to start over."); retrying = false; } } } if (response == null) { // Need to start over continue; } // Run response protocol interceptors response.setParams(params); requestExec.postProcess(response, httpProcessor, context); // The connection is in or can be brought to a re-usable state. reuse = reuseStrategy.keepAlive(response, context); if (reuse) { // Set the idle duration of this connection long duration = keepAliveStrategy.getKeepAliveDuration(response, context); managedConn.setIdleDuration(duration, TimeUnit.MILLISECONDS); if (this.log.isDebugEnabled()) { if (duration >= 0) { this.log.debug("Connection can be kept alive for " + duration + " ms"); } else { this.log.debug("Connection can be kept alive indefinitely"); } } } RoutedRequest followup = handleResponse(roureq, response, context); if (followup == null) { done = true; } else { if (reuse) { // Make sure the response body is fully consumed, if present HttpEntity entity = response.getEntity(); if (entity != null) { entity.consumeContent(); } // entity consumed above is not an auto-release entity, // need to mark the connection re-usable explicitly managedConn.markReusable(); } else { managedConn.close(); } // check if we can use the same connection for the followup if (!followup.getRoute().equals(roureq.getRoute())) { releaseConnection(); } roureq = followup; } if (managedConn != null && userToken == null) { userToken = userTokenHandler.getUserToken(context); context.setAttribute(ClientContext.USER_TOKEN, userToken); if (userToken != null) { managedConn.setState(userToken); } } } // while not done // check for entity, release connection if possible if ((response == null) || (response.getEntity() == null) || !response.getEntity().isStreaming()) { // connection not needed and (assumed to be) in re-usable state if (reuse) managedConn.markReusable(); releaseConnection(); } else { // install an auto-release entity HttpEntity entity = response.getEntity(); entity = new BasicManagedEntity(entity, managedConn, reuse); response.setEntity(entity); } return response; } catch (HttpException | RuntimeException | IOException ex) { abortConnection(); throw ex; } }
From source file:org.apache.ogt.http.impl.client.DefaultRequestDirector.java
public HttpResponse execute(HttpHost target, HttpRequest request, HttpContext context) throws HttpException, IOException { HttpRequest orig = request;/*from w w w. j ava2 s . co m*/ RequestWrapper origWrapper = wrapRequest(orig); origWrapper.setParams(params); HttpRoute origRoute = determineRoute(target, origWrapper, context); virtualHost = (HttpHost) orig.getParams().getParameter(ClientPNames.VIRTUAL_HOST); RoutedRequest roureq = new RoutedRequest(origWrapper, origRoute); long timeout = HttpConnectionParams.getConnectionTimeout(params); boolean reuse = false; boolean done = false; try { HttpResponse response = null; while (!done) { // In this loop, the RoutedRequest may be replaced by a // followup request and route. The request and route passed // in the method arguments will be replaced. The original // request is still available in 'orig'. RequestWrapper wrapper = roureq.getRequest(); HttpRoute route = roureq.getRoute(); response = null; // See if we have a user token bound to the execution context Object userToken = context.getAttribute(ClientContext.USER_TOKEN); // Allocate connection if needed if (managedConn == null) { ClientConnectionRequest connRequest = connManager.requestConnection(route, userToken); if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setConnectionRequest(connRequest); } try { managedConn = connRequest.getConnection(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException interrupted) { InterruptedIOException iox = new InterruptedIOException(); iox.initCause(interrupted); throw iox; } if (HttpConnectionParams.isStaleCheckingEnabled(params)) { // validate connection if (managedConn.isOpen()) { this.log.debug("Stale connection check"); if (managedConn.isStale()) { this.log.debug("Stale connection detected"); managedConn.close(); } } } } if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setReleaseTrigger(managedConn); } try { tryConnect(roureq, context); } catch (TunnelRefusedException ex) { if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage()); } response = ex.getResponse(); break; } // Reset headers on the request wrapper wrapper.resetHeaders(); // Re-write request URI if needed rewriteRequestURI(wrapper, route); // Use virtual host if set target = virtualHost; if (target == null) { target = route.getTargetHost(); } HttpHost proxy = route.getProxyHost(); // Populate the execution context context.setAttribute(ExecutionContext.HTTP_TARGET_HOST, target); context.setAttribute(ExecutionContext.HTTP_PROXY_HOST, proxy); context.setAttribute(ExecutionContext.HTTP_CONNECTION, managedConn); context.setAttribute(ClientContext.TARGET_AUTH_STATE, targetAuthState); context.setAttribute(ClientContext.PROXY_AUTH_STATE, proxyAuthState); // Run request protocol interceptors requestExec.preProcess(wrapper, httpProcessor, context); response = tryExecute(roureq, context); if (response == null) { // Need to start over continue; } // Run response protocol interceptors response.setParams(params); requestExec.postProcess(response, httpProcessor, context); // The connection is in or can be brought to a re-usable state. reuse = reuseStrategy.keepAlive(response, context); if (reuse) { // Set the idle duration of this connection long duration = keepAliveStrategy.getKeepAliveDuration(response, context); if (this.log.isDebugEnabled()) { String s; if (duration > 0) { s = "for " + duration + " " + TimeUnit.MILLISECONDS; } else { s = "indefinitely"; } this.log.debug("Connection can be kept alive " + s); } managedConn.setIdleDuration(duration, TimeUnit.MILLISECONDS); } RoutedRequest followup = handleResponse(roureq, response, context); if (followup == null) { done = true; } else { if (reuse) { // Make sure the response body is fully consumed, if present HttpEntity entity = response.getEntity(); EntityUtils.consume(entity); // entity consumed above is not an auto-release entity, // need to mark the connection re-usable explicitly managedConn.markReusable(); } else { managedConn.close(); } // check if we can use the same connection for the followup if (!followup.getRoute().equals(roureq.getRoute())) { releaseConnection(); } roureq = followup; } if (managedConn != null && userToken == null) { userToken = userTokenHandler.getUserToken(context); context.setAttribute(ClientContext.USER_TOKEN, userToken); if (userToken != null) { managedConn.setState(userToken); } } } // while not done // check for entity, release connection if possible if ((response == null) || (response.getEntity() == null) || !response.getEntity().isStreaming()) { // connection not needed and (assumed to be) in re-usable state if (reuse) managedConn.markReusable(); releaseConnection(); } else { // install an auto-release entity HttpEntity entity = response.getEntity(); entity = new BasicManagedEntity(entity, managedConn, reuse); response.setEntity(entity); } return response; } catch (ConnectionShutdownException ex) { InterruptedIOException ioex = new InterruptedIOException("Connection has been shut down"); ioex.initCause(ex); throw ioex; } catch (HttpException ex) { abortConnection(); throw ex; } catch (IOException ex) { abortConnection(); throw ex; } catch (RuntimeException ex) { abortConnection(); throw ex; } }
From source file:org.apache.http.impl.client.StatiscicsLoggingRequestDirector.java
public HttpResponse execute(HttpHost target, HttpRequest request, HttpContext context) throws HttpException, IOException { HttpRequest orig = request;//from ww w . ja v a2 s . c o m RequestWrapper origWrapper = wrapRequest(orig); origWrapper.setParams(params); HttpRoute origRoute = determineRoute(target, origWrapper, context); virtualHost = (HttpHost) orig.getParams().getParameter(ClientPNames.VIRTUAL_HOST); RoutedRequest roureq = new RoutedRequest(origWrapper, origRoute); long timeout = HttpConnectionParams.getConnectionTimeout(params); boolean reuse = false; boolean done = false; try { HttpResponse response = null; while (!done) { // In this loop, the RoutedRequest may be replaced by a // followup request and route. The request and route passed // in the method arguments will be replaced. The original // request is still available in 'orig'. RequestWrapper wrapper = roureq.getRequest(); HttpRoute route = roureq.getRoute(); response = null; // See if we have a user token bound to the execution context Object userToken = context.getAttribute(ClientContext.USER_TOKEN); // Allocate connection if needed if (managedConn == null) { ClientConnectionRequest connRequest = connManager.requestConnection(route, userToken); if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setConnectionRequest(connRequest); } try { managedConn = connRequest.getConnection(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException interrupted) { InterruptedIOException iox = new InterruptedIOException(); iox.initCause(interrupted); throw iox; } if (HttpConnectionParams.isStaleCheckingEnabled(params)) { // validate connection if (managedConn.isOpen()) { this.log.debug("Stale connection check"); if (managedConn.isStale()) { this.log.debug("Stale connection detected"); managedConn.close(); } } } } if (orig instanceof AbortableHttpRequest) { ((AbortableHttpRequest) orig).setReleaseTrigger(managedConn); } try { tryConnect(roureq, context); } catch (TunnelRefusedException ex) { if (this.log.isDebugEnabled()) { this.log.debug(ex.getMessage()); } response = ex.getResponse(); break; } // Reset headers on the request wrapper wrapper.resetHeaders(); // Re-write request URI if needed rewriteRequestURI(wrapper, route); // Use virtual host if set target = virtualHost; if (target == null) { target = route.getTargetHost(); } HttpHost proxy = route.getProxyHost(); // Populate the execution context context.setAttribute(ExecutionContext.HTTP_TARGET_HOST, target); context.setAttribute(ExecutionContext.HTTP_PROXY_HOST, proxy); context.setAttribute(ExecutionContext.HTTP_CONNECTION, managedConn); context.setAttribute(ClientContext.TARGET_AUTH_STATE, targetAuthState); context.setAttribute(ClientContext.PROXY_AUTH_STATE, proxyAuthState); synchronized (connectionStats) { ConnectionStatistics stats = new ConnectionStatistics(); stats.setConnectionState(State.OPEN); stats.setLastUsed(System.currentTimeMillis()); stats.setLastRequest(request.getRequestLine().getUri()); connectionStats.put(java.lang.System.identityHashCode(managedConn), stats); } // Run request protocol interceptors requestExec.preProcess(wrapper, httpProcessor, context); response = tryExecute(roureq, context); if (response == null) { // Need to start over continue; } // Run response protocol interceptors response.setParams(params); requestExec.postProcess(response, httpProcessor, context); // The connection is in or can be brought to a re-usable state. reuse = reuseStrategy.keepAlive(response, context); if (reuse) { // Set the idle duration of this connection long duration = keepAliveStrategy.getKeepAliveDuration(response, context); if (this.log.isDebugEnabled()) { String s; if (duration > 0) { s = "for " + duration + " " + TimeUnit.MILLISECONDS; } else { s = "indefinitely"; } this.log.debug("Connection can be kept alive " + s); } managedConn.setIdleDuration(duration, TimeUnit.MILLISECONDS); } RoutedRequest followup = handleResponse(roureq, response, context); if (followup == null) { done = true; } else { if (reuse) { // Make sure the response body is fully consumed, if present HttpEntity entity = response.getEntity(); EntityUtils.consume(entity); // entity consumed above is not an auto-release entity, // need to mark the connection re-usable explicitly managedConn.markReusable(); } else { managedConn.close(); } // check if we can use the same connection for the followup if (!followup.getRoute().equals(roureq.getRoute())) { releaseConnection(); } roureq = followup; } if (managedConn != null && userToken == null) { userToken = userTokenHandler.getUserToken(context); context.setAttribute(ClientContext.USER_TOKEN, userToken); if (userToken != null) { managedConn.setState(userToken); } } } // while not done // check for entity, release connection if possible if ((response == null) || (response.getEntity() == null) || !response.getEntity().isStreaming()) { // connection not needed and (assumed to be) in re-usable state if (reuse) managedConn.markReusable(); releaseConnection(); } else { // install an auto-release entity HttpEntity entity = response.getEntity(); entity = new StatisticsAwareManagedEntity(entity, managedConn, reuse, this.connectionStats); response.setEntity(entity); } return response; } catch (ConnectionShutdownException ex) { InterruptedIOException ioex = new InterruptedIOException("Connection has been shut down"); ioex.initCause(ex); throw ioex; } catch (HttpException ex) { abortConnection(); throw ex; } catch (IOException ex) { abortConnection(); throw ex; } catch (RuntimeException ex) { abortConnection(); throw ex; } }
From source file:org.apache.hadoop.hbase.rest.client.RemoteHTable.java
public void put(List<Put> puts) throws IOException { // this is a trick: The gateway accepts multiple rows in a cell set and // ignores the row specification in the URI // separate puts by row TreeMap<byte[], List<Cell>> map = new TreeMap<byte[], List<Cell>>(Bytes.BYTES_COMPARATOR); for (Put put : puts) { byte[] row = put.getRow(); List<Cell> cells = map.get(row); if (cells == null) { cells = new ArrayList<Cell>(); map.put(row, cells);//w w w . j av a2s. c o m } for (List<Cell> l : put.getFamilyCellMap().values()) { cells.addAll(l); } } // build the cell set CellSetModel model = new CellSetModel(); for (Map.Entry<byte[], List<Cell>> e : map.entrySet()) { RowModel row = new RowModel(e.getKey()); for (Cell cell : e.getValue()) { KeyValue kv = KeyValueUtil.ensureKeyValue(cell); row.addCell(new CellModel(kv)); } model.addRow(row); } // build path for multiput StringBuilder sb = new StringBuilder(); sb.append('/'); sb.append(Bytes.toStringBinary(name)); sb.append("/$multiput"); // can be any nonexistent row for (int i = 0; i < maxRetries; i++) { Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { case 200: return; case 509: try { Thread.sleep(sleepTime); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } break; default: throw new IOException("multiput request failed with " + code); } } throw new IOException("multiput request timed out"); }
From source file:org.apache.hadoop.hbase.master.procedure.DispatchMergingRegionsProcedure.java
/** * Move all regions to the same region server * @param env MasterProcedureEnv/*from w ww . java 2 s . c om*/ * @return whether target regions hosted by the same RS * @throws IOException */ private boolean MoveRegionsToSameRS(final MasterProcedureEnv env) throws IOException { // Make sure regions are on the same regionserver before send merge // regions request to region server. // boolean onSameRS = isRegionsOnTheSameServer(env); if (!onSameRS) { // Note: the following logic assumes that we only have 2 regions to merge. In the future, // if we want to extend to more than 2 regions, the code needs to modify a little bit. // RegionStates regionStates = getAssignmentManager(env).getRegionStates(); ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]); RegionLoad loadOfRegionA = getRegionLoad(env, regionLocation, regionsToMerge[0]); RegionLoad loadOfRegionB = getRegionLoad(env, regionLocation2, regionsToMerge[1]); if (loadOfRegionA != null && loadOfRegionB != null && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) { // switch regionsToMerge[0] and regionsToMerge[1] HRegionInfo tmpRegion = this.regionsToMerge[0]; this.regionsToMerge[0] = this.regionsToMerge[1]; this.regionsToMerge[1] = tmpRegion; ServerName tmpLocation = regionLocation; regionLocation = regionLocation2; regionLocation2 = tmpLocation; } long startTime = EnvironmentEdgeManager.currentTime(); RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation); LOG.info("Moving regions to same server for merge: " + regionPlan.toString()); getAssignmentManager(env).balance(regionPlan); do { try { Thread.sleep(20); // Make sure check RIT first, then get region location, otherwise // we would make a wrong result if region is online between getting // region location and checking RIT boolean isRIT = regionStates.isRegionInTransition(regionsToMerge[1]); regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]); onSameRS = regionLocation.equals(regionLocation2); if (onSameRS || !isRIT) { // Regions are on the same RS, or regionsToMerge[1] is not in // RegionInTransition any more break; } } catch (InterruptedException e) { InterruptedIOException iioe = new InterruptedIOException(); iioe.initCause(e); throw iioe; } } while ((EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env)); } return onSameRS; }
From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java
/** * This takes the LQI's grouped by likely regions and attempts to bulk load * them. Any failures are re-queued for another pass with the * groupOrSplitPhase.//from w w w . jav a2s . c om */ protected void bulkLoadPhase(final HTable table, final HConnection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException { // atomically bulk load the groups. Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<Future<List<LoadQueueItem>>>(); for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) { final byte[] first = e.getKey().array(); final Collection<LoadQueueItem> lqis = e.getValue(); final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() { public List<LoadQueueItem> call() throws Exception { List<LoadQueueItem> toRetry = tryAtomicRegionLoad(conn, table.getName(), first, lqis); return toRetry; } }; loadingFutures.add(pool.submit(call)); } // get all the results. for (Future<List<LoadQueueItem>> future : loadingFutures) { try { List<LoadQueueItem> toRetry = future.get(); // LQIs that are requeued to be regrouped. queue.addAll(toRetry); } catch (ExecutionException e1) { Throwable t = e1.getCause(); if (t instanceof IOException) { // At this point something unrecoverable has happened. // TODO Implement bulk load recovery throw new IOException("BulkLoad encountered an unrecoverable problem", t); } LOG.error("Unexpected execution exception during bulk load", e1); throw new IllegalStateException(t); } catch (InterruptedException e1) { LOG.error("Unexpected interrupted exception during bulk load", e1); throw (InterruptedIOException) new InterruptedIOException().initCause(e1); } } }
From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java
/** * Perform time consuming opening of the daughter regions. * @param server Hosting server instance. Can be null when testing (won't try * and update in zk if a null server)//from w ww. ja v a 2 s . c om * @param services Used to online/offline regions. * @param a first daughter region * @param a second daughter region * @throws IOException If thrown, transaction failed. * Call {@link #rollback(Server, RegionServerServices)} */ @Override /* package */void openDaughters(final Server server, final RegionServerServices services, Region a, Region b) throws IOException { boolean stopped = server != null && server.isStopped(); boolean stopping = services != null && services.isStopping(); // TODO: Is this check needed here? if (stopped || stopping) { LOG.info("Not opening daughters " + b.getRegionInfo().getRegionNameAsString() + " and " + a.getRegionInfo().getRegionNameAsString() + " because stopping=" + stopping + ", stopped=" + stopped); } else { // Open daughters in parallel. DaughterOpener aOpener = new DaughterOpener(server, (HRegion) a); DaughterOpener bOpener = new DaughterOpener(server, (HRegion) b); aOpener.start(); bOpener.start(); try { aOpener.join(); bOpener.join(); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } if (aOpener.getException() != null) { throw new IOException("Failed " + aOpener.getName(), aOpener.getException()); } if (bOpener.getException() != null) { throw new IOException("Failed " + bOpener.getName(), bOpener.getException()); } if (services != null) { try { // add 2nd daughter first (see HBASE-4335) services.postOpenDeployTasks(b); // Should add it to OnlineRegions services.addToOnlineRegions(b); services.postOpenDeployTasks(a); services.addToOnlineRegions(a); } catch (KeeperException ke) { throw new IOException(ke); } } } }
From source file:com.sa.npopa.samples.hbase.rest.client.RemoteHTable.java
@Override public void put(List<Put> puts) throws IOException { // this is a trick: The gateway accepts multiple rows in a cell set and // ignores the row specification in the URI // separate puts by row TreeMap<byte[], List<Cell>> map = new TreeMap<byte[], List<Cell>>(Bytes.BYTES_COMPARATOR); for (Put put : puts) { byte[] row = put.getRow(); List<Cell> cells = map.get(row); if (cells == null) { cells = new ArrayList<Cell>(); map.put(row, cells);/*from w w w . j a v a 2 s . c o m*/ } for (List<Cell> l : put.getFamilyCellMap().values()) { cells.addAll(l); } } // build the cell set CellSetModel model = new CellSetModel(); for (Map.Entry<byte[], List<Cell>> e : map.entrySet()) { RowModel row = new RowModel(e.getKey()); for (Cell cell : e.getValue()) { row.addCell(new CellModel(cell)); } model.addRow(row); } // build path for multiput StringBuilder sb = new StringBuilder(); sb.append('/'); sb.append(Bytes.toStringBinary(name)); sb.append("/$multiput"); // can be any nonexistent row for (int i = 0; i < maxRetries; i++) { Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { case 200: return; case 509: try { Thread.sleep(sleepTime); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } break; default: throw new IOException("multiput request failed with " + code); } } throw new IOException("multiput request timed out"); }
From source file:org.apache.hadoop.hbase.rest.client.RemoteHTable.java
public void delete(Delete delete) throws IOException { String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimeStamp(), delete.getTimeStamp(), 1);//from w w w . j a v a 2 s .c o m for (int i = 0; i < maxRetries; i++) { Response response = client.delete(spec); int code = response.getCode(); switch (code) { case 200: return; case 509: try { Thread.sleep(sleepTime); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } break; default: throw new IOException("delete request failed with " + code); } } throw new IOException("delete request timed out"); }
From source file:org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.java
boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws IOException { boolean isCorrupted = false; Preconditions.checkState(status == null); boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", HLog.SPLIT_SKIP_ERRORS_DEFAULT); int interval = conf.getInt("hbase.splitlog.report.interval.loglines", 1024); Path logPath = logfile.getPath(); boolean outputSinkStarted = false; boolean progress_failed = false; int editsCount = 0; int editsSkipped = 0; try {//from ww w. j a v a 2 s . co m status = TaskMonitor.get() .createStatus("Splitting log file " + logfile.getPath() + "into a temporary staging area."); long logLength = logfile.getLen(); LOG.info("Splitting hlog: " + logPath + ", length=" + logLength); LOG.info("DistributedLogReplay = " + this.distributedLogReplay); status.setStatus("Opening log file"); if (reporter != null && !reporter.progress()) { progress_failed = true; return false; } Reader in = null; try { in = getReader(fs, logfile, conf, skipErrors, reporter); } catch (CorruptedLogFileException e) { LOG.warn("Could not get reader, corrupted log file " + logPath, e); ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs); isCorrupted = true; } if (in == null) { status.markComplete("Was nothing to split in log file"); LOG.warn("Nothing to split in log file " + logPath); return true; } if (watcher != null && csm != null) { try { TableStateManager tsm = csm.getTableStateManager(); disablingOrDisabledTables = tsm.getTablesInStates(ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING); } catch (CoordinatedStateException e) { throw new IOException("Can't get disabling/disabled tables", e); } } int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3); int numOpenedFilesLastCheck = 0; outputSink.setReporter(reporter); outputSink.startWriterThreads(); outputSinkStarted = true; Entry entry; Long lastFlushedSequenceId = -1L; ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(logPath); failedServerName = (serverName == null) ? "" : serverName.getServerName(); while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) { byte[] region = entry.getKey().getEncodedRegionName(); String key = Bytes.toString(region); lastFlushedSequenceId = lastFlushedSequenceIds.get(key); if (lastFlushedSequenceId == null) { if (this.distributedLogReplay) { RegionStoreSequenceIds ids = SplitLogManager.getRegionFlushedSequenceId(this.watcher, failedServerName, key); if (ids != null) { lastFlushedSequenceId = ids.getLastFlushedSequenceId(); } } else if (sequenceIdChecker != null) { lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region); } if (lastFlushedSequenceId == null) { lastFlushedSequenceId = -1L; } lastFlushedSequenceIds.put(key, lastFlushedSequenceId); } if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) { editsSkipped++; continue; } entryBuffers.appendEntry(entry); editsCount++; int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck; // If sufficient edits have passed, check if we should report progress. if (editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) { numOpenedFilesLastCheck = this.getNumOpenWriters(); String countsStr = (editsCount - (editsSkipped + outputSink.getSkippedEdits())) + " edits, skipped " + editsSkipped + " edits."; status.setStatus("Split " + countsStr); if (reporter != null && !reporter.progress()) { progress_failed = true; return false; } } } } catch (InterruptedException ie) { IOException iie = new InterruptedIOException(); iie.initCause(ie); throw iie; } catch (CorruptedLogFileException e) { LOG.warn("Could not parse, corrupted log file " + logPath, e); ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs); isCorrupted = true; } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); throw e; } finally { LOG.debug("Finishing writing output logs and closing down."); if (outputSinkStarted) { progress_failed = outputSink.finishWritingAndClose() == null; } String msg = "Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions() + " regions; log file=" + logPath + " is corrupted = " + isCorrupted + " progress failed = " + progress_failed; LOG.info(msg); status.markComplete(msg); } return !progress_failed; }