Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException() 

Source Link

Document

Constructs an InterruptedIOException with null as its error detail message.

Usage

From source file:org.kohsuke.github.GHRepository.java

/**
 * Forks this repository into an organization.
 *
 * @return/*  w  w w .ja va2s  .  c  om*/
 *      Newly forked repository that belong to you.
 */
public GHRepository forkTo(GHOrganization org) throws IOException {
    new Requester(root).to(getApiTailUrl("forks?org=" + org.getLogin()));

    // this API is asynchronous. we need to wait for a bit
    for (int i = 0; i < 10; i++) {
        GHRepository r = org.getRepository(name);
        if (r != null)
            return r;
        try {
            Thread.sleep(3000);
        } catch (InterruptedException e) {
            throw (IOException) new InterruptedIOException().initCause(e);
        }
    }
    throw new IOException(this + " was forked into " + org.getLogin() + " but can't find the new repository");
}

From source file:org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure.java

private boolean assign(final MasterProcedureEnv env, final List<HRegionInfo> hris)
        throws InterruptedIOException {
    AssignmentManager am = env.getMasterServices().getAssignmentManager();
    try {// w w w. j  a v a 2  s . c o m
        am.assign(hris);
    } catch (InterruptedException ie) {
        LOG.error("Caught " + ie + " during round-robin assignment");
        throw (InterruptedIOException) new InterruptedIOException().initCause(ie);
    } catch (IOException ioe) {
        LOG.info("Caught " + ioe + " during region assignment, will retry");
        return false;
    }
    return true;
}

From source file:org.apache.hadoop.hbase.rest.client.RemoteHTable.java

public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
        throws IOException {
    // column to check-the-value
    put.add(new KeyValue(row, family, qualifier, value));

    CellSetModel model = buildModelFromPut(put);
    StringBuilder sb = new StringBuilder();
    sb.append('/');
    sb.append(Bytes.toStringBinary(name));
    sb.append('/');
    sb.append(Bytes.toStringBinary(put.getRow()));
    sb.append("?check=put");

    for (int i = 0; i < maxRetries; i++) {
        Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
                model.createProtobufOutput());
        int code = response.getCode();
        switch (code) {
        case 200:
            return true;
        case 304: // NOT-MODIFIED
            return false;
        case 509:
            try {
                Thread.sleep(sleepTime);
            } catch (final InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }/*from w ww. java 2  s.  c o  m*/
            break;
        default:
            throw new IOException("checkAndPut request failed with " + code);
        }
    }
    throw new IOException("checkAndPut request timed out");
}

From source file:org.apache.hadoop.hbase.ipc.BlockingRpcConnection.java

/**
 * Initiates a call by sending the parameter to the remote server. Note: this is not called from
 * the Connection thread, but by other threads.
 * @see #readResponse()/*from  w ww  .  ja v a  2s  .co  m*/
 */
private void writeRequest(Call call) throws IOException {
    ByteBuffer cellBlock = this.rpcClient.cellBlockBuilder.buildCellBlock(this.codec, this.compressor,
            call.cells);
    CellBlockMeta cellBlockMeta;
    if (cellBlock != null) {
        cellBlockMeta = CellBlockMeta.newBuilder().setLength(cellBlock.limit()).build();
    } else {
        cellBlockMeta = null;
    }
    RequestHeader requestHeader = buildRequestHeader(call, cellBlockMeta);

    setupIOstreams();

    // Now we're going to write the call. We take the lock, then check that the connection
    // is still valid, and, if so we do the write to the socket. If the write fails, we don't
    // know where we stand, we have to close the connection.
    if (Thread.interrupted()) {
        throw new InterruptedIOException();
    }

    calls.put(call.id, call); // We put first as we don't want the connection to become idle.
    // from here, we do not throw any exception to upper layer as the call has been tracked in the
    // pending calls map.
    try {
        call.callStats.setRequestSizeBytes(write(this.out, requestHeader, call.param, cellBlock));
    } catch (IOException e) {
        closeConn(e);
        return;
    }
    notifyAll();
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * This takes the LQI's grouped by likely regions and attempts to bulk load them. Any failures are
 * re-queued for another pass with the groupOrSplitPhase.
 * <p>/*w w w.  j  av a  2 s.c  om*/
 * protected for testing.
 */
@VisibleForTesting
protected void bulkLoadPhase(Table table, Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue,
        Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile,
        Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
    // atomically bulk load the groups.
    Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<>();
    for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
        byte[] first = e.getKey().array();
        Collection<LoadQueueItem> lqis = e.getValue();

        ClientServiceCallable<byte[]> serviceCallable = buildClientServiceCallable(conn, table.getName(), first,
                lqis, copyFile);

        Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            @Override
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> toRetry = tryAtomicRegionLoad(serviceCallable, table.getName(), first,
                        lqis);
                return toRetry;
            }
        };
        if (item2RegionMap != null) {
            for (LoadQueueItem lqi : lqis) {
                item2RegionMap.put(lqi, e.getKey());
            }
        }
        loadingFutures.add(pool.submit(call));
    }

    // get all the results.
    for (Future<List<LoadQueueItem>> future : loadingFutures) {
        try {
            List<LoadQueueItem> toRetry = future.get();

            if (item2RegionMap != null) {
                for (LoadQueueItem lqi : toRetry) {
                    item2RegionMap.remove(lqi);
                }
            }
            // LQIs that are requeued to be regrouped.
            queue.addAll(toRetry);

        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                // At this point something unrecoverable has happened.
                // TODO Implement bulk load recovery
                throw new IOException("BulkLoad encountered an unrecoverable problem", t);
            }
            LOG.error("Unexpected execution exception during bulk load", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during bulk load", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
}

From source file:com.sa.npopa.samples.hbase.rest.client.RemoteHTable.java

@Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
        throws IOException {
    // column to check-the-value
    put.add(new KeyValue(row, family, qualifier, value));

    CellSetModel model = buildModelFromPut(put);
    StringBuilder sb = new StringBuilder();
    sb.append('/');
    sb.append(Bytes.toStringBinary(name));
    sb.append('/');
    sb.append(Bytes.toStringBinary(put.getRow()));
    sb.append("?check=put");

    for (int i = 0; i < maxRetries; i++) {
        Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
                model.createProtobufOutput());
        int code = response.getCode();
        switch (code) {
        case 200:
            return true;
        case 304: // NOT-MODIFIED
            return false;
        case 509:
            try {
                Thread.sleep(sleepTime);
            } catch (final InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }/*from  w  w  w  .  j a  v a 2s.co m*/
            break;
        default:
            throw new IOException("checkAndPut request failed with " + code);
        }
    }
    throw new IOException("checkAndPut request timed out");
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.java

@Override
protected AsyncWriter createWriterInstance(Path path) throws IOException {
    boolean overwrite = false;
    for (int retry = 0;; retry++) {
        try {//from   w  ww.ja  va  2s  .  com
            return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, overwrite, eventLoop);
        } catch (RemoteException e) {
            LOG.warn("create wal log writer " + path + " failed, retry = " + retry, e);
            if (shouldRetryCreate(e)) {
                if (retry >= createMaxRetries) {
                    break;
                }
            } else {
                throw e.unwrapRemoteException();
            }
        } catch (NameNodeException e) {
            throw e;
        } catch (IOException e) {
            LOG.warn("create wal log writer " + path + " failed, retry = " + retry, e);
            if (retry >= createMaxRetries) {
                break;
            }
            // overwrite the old broken file.
            overwrite = true;
            try {
                Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
            } catch (InterruptedException ie) {
                throw new InterruptedIOException();
            }
        }
    }
    throw new IOException(
            "Failed to create wal log writer " + path + " after retrying " + createMaxRetries + " time(s)");
}

From source file:org.apache.hadoop.hbase.rest.client.RemoteHTable.java

public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete)
        throws IOException {
    Put put = new Put(row);
    // column to check-the-value
    put.add(new KeyValue(row, family, qualifier, value));
    CellSetModel model = buildModelFromPut(put);
    StringBuilder sb = new StringBuilder();
    sb.append('/');
    sb.append(Bytes.toStringBinary(name));
    sb.append('/');
    sb.append(Bytes.toStringBinary(row));
    sb.append("?check=delete");

    for (int i = 0; i < maxRetries; i++) {
        Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
                model.createProtobufOutput());
        int code = response.getCode();
        switch (code) {
        case 200:
            return true;
        case 304: // NOT-MODIFIED
            return false;
        case 509:
            try {
                Thread.sleep(sleepTime);
            } catch (final InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }//  w  ww  . j a va2 s.c o m
            break;
        default:
            throw new IOException("checkAndDelete request failed with " + code);
        }
    }
    throw new IOException("checkAndDelete request timed out");
}

From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java

/**
 * Creates reference files for top and bottom half of the
 * @param hstoreFilesToSplit map of store files to create half file references for.
 * @return the number of reference files that were created.
 * @throws IOException/*from ww w  . jav  a2 s.  com*/
 */
private Pair<Integer, Integer> splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
        throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = 0;
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        nbFiles += entry.getValue().size();
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            parent.conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent + " using " + maxThreads
            + " threads");
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int created_a = 0;
    int created_b = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            created_a += p.getFirst() != null ? 1 : 0;
            created_b += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + this.parent + " Daughter A: " + created_a
                + " storefiles, Daughter B: " + created_b + " storefiles.");
    }
    return new Pair<Integer, Integer>(created_a, created_b);
}

From source file:org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure.java

/**
 * Create Split directory//from www . j  av  a  2s.  c  o  m
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs)
        throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();

    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null) {
            nbFiles += storeFiles.size();
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(
            conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
            Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + " using " + maxThreads
            + " threads");
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads,
            Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (String family : regionFs.getFamilies()) {
        final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
        final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, family.getBytes(), new StoreFile(
                        mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA
                + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<Integer, Integer>(daughterA, daughterB);
}