Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException(String s) 

Source Link

Document

Constructs an InterruptedIOException with the specified detail message.

Usage

From source file:org.openymsg.network.Session.java

/**
 * Logs the provided yahoo identity into the provided lobby.
 * /*w ww.j  a va 2s.  c  o m*/
 * @param lobby
 *            Lobby to login to.
 * @param yahooId
 *            Yahoo Identity that should login to the lobby.
 * @throws IllegalStateException
 * @throws IOException
 * @throws LoginRefusedException
 * @throws IllegalIdentityException
 */
public void chatLogin(final YahooChatLobby lobby, final YahooIdentity yahooId)
        throws IllegalStateException, IOException, LoginRefusedException, IllegalIdentityException {
    checkStatus();

    if (!this.identities.containsKey(yahooId.getId()))
        throw new IllegalIdentityException(
                "The YahooIdentity '" + yahooId.getId() + "'is not a valid identity for this session.");

    // TODO synchronized blocks don't really work (java optimization problem). Need a separate synchronized method
    // for this
    synchronized (this) {
        if (this.chatSessionStatus != SessionState.UNSTARTED
                && this.chatSessionStatus != SessionState.LOGGED_ON)
            throw new IllegalStateException(
                    "Chat session should be unstarted or messaging. You can't login to two chatrooms at the same time. Wait for one login to complete before connecting to the next one.");
        this.chatSessionStatus = SessionState.CONNECTING;
    }

    final long timeout = System.currentTimeMillis() + Util.loginTimeout(NetworkConstants.LOGIN_TIMEOUT);
    this.chatID = yahooId;

    try {
        transmitChatConnect(this.chatID.getId());
        while (this.chatSessionStatus != SessionState.CONNECTED && !past(timeout))
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                // ignore
            }
        if (past(timeout))
            throw new InterruptedIOException("Chat connect timed out");

        // Transmit 'login' packet and wait for acknowledgement
        transmitChatJoin(lobby.getNetworkName(), lobby.getParentRoomId());
        while (this.chatSessionStatus == SessionState.CONNECTED && !past(timeout))
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                // ignore
            }

        if (past(timeout))
            throw new InterruptedIOException("Chat login timed out");

        if (this.chatSessionStatus == SessionState.FAILED)
            throw (LoginRefusedException) this.loginException;

        // Successful?
        if (this.chatSessionStatus == SessionState.LOGGED_ON)
            this.currentLobby = lobby;
        else
            this.currentLobby = null;
    } finally {
        if (this.chatSessionStatus != SessionState.LOGGED_ON) {
            this.chatSessionStatus = SessionState.FAILED;
            this.chatID = null;
        }
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.Standby.java

/**
 * Interrupts and joins ongoing image validation.
 * @throws IOException//  w w w.  j  av  a  2 s  . c om
 */
private void interruptImageValidation() throws IOException {
    synchronized (imageValidatorLock) {
        if (imageValidator != null) {
            imageValidator.interrupt();
            try {
                imageValidator.join();
            } catch (InterruptedException e) {
                throw new InterruptedIOException("Standby: received interruption");
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java

/**
 * Add a peer for this cluster. Creates the replicated tables in the peer.
 * //from w w  w.  ja v a2s.c  om
 * @param clusterName
 * @param peer
 *          Peer cluster. Add the peer cluster name as first item in the Pair and peer cluster
 *          address as second item.
 * @throws IOException
 * @throws ServiceException
 */
public void addPeer(final String clusterName, final Pair<String, String> peer)
        throws IOException, ServiceException {
    if (peer == null || Strings.isNullOrEmpty(peer.getFirst()) || Strings.isNullOrEmpty(peer.getSecond())) {
        throw new IllegalArgumentException("Peer should be specified");
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Adding peer " + peer + " to the cluster " + clusterName);
    }
    for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; ++tries) {
        boolean locked = false;
        try {
            locked = znodes.lockCluster(clusterName);
            if (locked) {
                // STEP 0 : Check whether the cluster name is existent, and whether the peer is existent.
                String clusterAddress = znodes.getClusterAddress(clusterName);
                if (clusterAddress == null) {
                    throw new IllegalArgumentException("The cluster[" + clusterName + "] doesn't exist");
                }
                if (clusterAddress.equals(peer.getSecond())) {
                    throw new IllegalArgumentException("Could not add self as peer");
                }
                if (znodes.isPeerExists(clusterName, peer.getFirst())) {
                    throw new IllegalArgumentException("The peer's name[" + peer + "] has been existent");
                }
                List<ClusterInfo> peers = znodes.getPeerClusters(clusterName);
                for (ClusterInfo p : peers) {
                    if (peer.getSecond().equals(p.getAddress()))
                        throw new IllegalArgumentException(
                                "The peer's address[" + peer + "] has been existent");
                }
                // STEP 1 : Create all the replicated tables of source cluster in the peer cluster
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Creating replicated tables of " + clusterName + " in peer " + peer);
                }
                Configuration clusterConf = new Configuration(conf);
                ZKUtil.applyClusterKeyToConf(clusterConf, peer.getSecond());
                HBaseAdmin.checkHBaseAvailable(clusterConf);
                String[] tableNames = znodes.getTableNames();
                if (tableNames != null && tableNames.length > 0) {
                    createTablesInPeer(clusterName, tableNames, peer);
                }
                // STEP 2 : Adding the peer to the peer list in zookeeper
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Adding " + peer + " to zk peer list for cluster " + clusterName);
                }
                znodes.createPeer(clusterName, peer);
                LOG.debug("Added " + peer + " to zk peer list for cluster " + clusterName);
                // STEP 3 : Add the peer to master cluster's replication peer list
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Adding " + peer + " to cluster " + clusterName + "'s replication peer list");
                }
                addReplicationPeer(clusterName, peer);
                LOG.info("Added " + peer + " to cluster " + clusterName + "'s replication peer list");

                LOG.info("The peer " + peer + " is added for cluster " + clusterName);
                return;
            }
        } catch (KeeperException e) {
            throw new IOException(e);
        } finally {
            if (locked) {
                znodes.unlockCluster(clusterName);
            }
        }
        if (tries < this.numRetries * this.retryLongerMultiplier - 1) {
            try { // Sleep
                Thread.sleep(getPauseTime(tries));
            } catch (InterruptedException e) {
                throw new InterruptedIOException(
                        "Interrupted when waiting" + " for cross site table peer creation");
            }
        }
    }
    // throw an exception
    throw new IOException("Retries exhausted while still waiting for the peer of the cluster: " + clusterName
            + " to be created");
}

From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java

/**
 * Disable table and wait on completion.  May timeout eventually.  Use
 * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
 * instead./*from  w w w  .  j  a  v a  2  s.c  o m*/
 * The table has to be in enabled state for it to be disabled.
 * @param tableName
 * @throws IOException
 * There could be couple types of IOException
 * TableNotFoundException means the table doesn't exist.
 * TableNotEnabledException means the table isn't in enabled state.
 */
public void disableTable(final TableName tableName) throws IOException {
    disableTableAsync(tableName);
    // Wait until table is disabled
    boolean disabled = false;
    for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
        disabled = isTableDisabled(tableName);
        if (disabled) {
            break;
        }
        long sleep = getPauseTime(tries);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + "disabled in " + tableName);
        }
        try {
            Thread.sleep(sleep);
        } catch (InterruptedException e) {
            // Do this conversion rather than let it out because do not want to
            // change the method signature.
            throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e);
        }
    }
    if (!disabled) {
        throw new RegionException("Retries exhausted, it took too long to wait" + " for the table " + tableName
                + " to be disabled.");
    }
    LOG.info("Disabled " + tableName);
}

From source file:org.apache.hadoop.hbase.client.AsyncProcess.java

/** Wait until the async does not have more than max tasks in progress. */
private void waitForMaximumCurrentTasks(int max) throws InterruptedIOException {
    long lastLog = EnvironmentEdgeManager.currentTimeMillis();
    long currentInProgress, oldInProgress = Long.MAX_VALUE;
    while ((currentInProgress = this.tasksInProgress.get()) > max) {
        if (oldInProgress != currentInProgress) { // Wait for in progress to change.
            long now = EnvironmentEdgeManager.currentTimeMillis();
            if (now > lastLog + 10000) {
                lastLog = now;//from   ww w.j a  va 2s.  c  o  m
                LOG.info("#" + id + ", waiting for some tasks to finish. Expected max=" + max
                        + ", tasksInProgress=" + currentInProgress);
            }
        }
        oldInProgress = currentInProgress;
        try {
            synchronized (this.tasksInProgress) {
                this.tasksInProgress.wait(100);
            }
        } catch (InterruptedException e) {
            throw new InterruptedIOException(
                    "#" + id + ", interrupted." + " currentNumberOfTask=" + currentInProgress);
        }
    }
}

From source file:org.apache.hadoop.hbase.ipc.RpcClientImpl.java

/** Stop all threads related to this client.  No further calls may be made
 * using this client. *//*from ww  w  .  j a va  2  s .  c  o  m*/
@Override
public void close() {
    if (LOG.isDebugEnabled())
        LOG.debug("Stopping rpc client");
    if (!running.compareAndSet(true, false))
        return;

    Set<Connection> connsToClose = null;
    // wake up all connections
    synchronized (connections) {
        for (Connection conn : connections.values()) {
            conn.interrupt();
            if (conn.callSender != null) {
                conn.callSender.interrupt();
            }

            // In case the CallSender did not setupIOStreams() yet, the Connection may not be started
            // at all (if CallSender has a cancelled Call it can happen). See HBASE-13851
            if (!conn.isAlive()) {
                if (connsToClose == null) {
                    connsToClose = new HashSet<Connection>();
                }
                connsToClose.add(conn);
            }
        }
    }
    if (connsToClose != null) {
        for (Connection conn : connsToClose) {
            conn.markClosed(new InterruptedIOException("RpcClient is closing"));
            conn.close();
        }
    }
    // wait until all connections are closed
    while (!connections.isEmpty()) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException e) {
            LOG.info("Interrupted while stopping the client. We still have " + connections.size()
                    + " connections.");
            Thread.currentThread().interrupt();
            return;
        }
    }
}

From source file:org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.java

@Override
public void waitUntilDone() throws InterruptedIOException {
    try {//  w w w  .ja v a 2  s. c o m
        waitUntilDone(Long.MAX_VALUE);
    } catch (InterruptedException iex) {
        throw new InterruptedIOException(iex.getMessage());
    } finally {
        if (callsInProgress != null) {
            for (CancellableRegionServerCallable clb : callsInProgress) {
                clb.cancel();
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.MetaTableAccessor.java

/**
 * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
 * @param connection connection we're using
 * @param mutations Puts and Deletes to execute on hbase:meta
 * @throws IOException/*from ww  w  . j a va2s  . co m*/
 */
public static void mutateMetaTable(final Connection connection, final List<Mutation> mutations)
        throws IOException {
    Table t = getMetaHTable(connection);
    try {
        if (METALOG.isDebugEnabled()) {
            METALOG.debug(mutationsToString(mutations));
        }
        t.batch(mutations, null);
    } catch (InterruptedException e) {
        InterruptedIOException ie = new InterruptedIOException(e.getMessage());
        ie.initCause(e);
        throw ie;
    } finally {
        t.close();
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.//from  w w w.j  a v  a2  s . c o  m
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 * @throws IOException IO problem
 * @throws FileAlreadyExistsException the destination file exists and
 * overwrite==false
 * @throws AmazonClientException failure in the AWS SDK
 */
private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst)
        throws IOException, FileAlreadyExistsException, AmazonClientException {
    incrementStatistic(INVOCATION_COPY_FROM_LOCAL_FILE);
    final String key = pathToKey(dst);

    if (!overwrite && exists(dst)) {
        throw new FileAlreadyExistsException(dst + " already exists");
    }
    LOG.debug("Copying local file from {} to {}", src, dst);

    // Since we have a local file, we don't need to stream into a temporary file
    LocalFileSystem local = getLocal(getConf());
    File srcfile = local.pathToFile(src);

    final ObjectMetadata om = newObjectMetadata();
    PutObjectRequest putObjectRequest = newPutObjectRequest(key, om, srcfile);
    Upload up = putObject(putObjectRequest);
    ProgressableProgressListener listener = new ProgressableProgressListener(this, key, up, null);
    up.addProgressListener(listener);
    try {
        up.waitForUploadResult();
    } catch (InterruptedException e) {
        throw new InterruptedIOException("Interrupted copying " + src + " to " + dst + ", cancelling");
    }
    listener.uploadCompleted();

    // This will delete unnecessary fake parent directories
    finishedWrite(key);

    if (delSrc) {
        local.delete(src, false);
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

/**
 * Copy a single object in the bucket via a COPY operation.
 * @param srcKey source object path/*from  w  ww  . j  ava 2  s.  c om*/
 * @param dstKey destination object path
 * @param size object size
 * @throws AmazonClientException on failures inside the AWS SDK
 * @throws InterruptedIOException the operation was interrupted
 * @throws IOException Other IO problems
 */
private void copyFile(String srcKey, String dstKey, long size)
        throws IOException, InterruptedIOException, AmazonClientException {
    LOG.debug("copyFile {} -> {} ", srcKey, dstKey);

    try {
        ObjectMetadata srcom = getObjectMetadata(srcKey);
        ObjectMetadata dstom = cloneObjectMetadata(srcom);
        if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
            dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
        }
        CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
        copyObjectRequest.setCannedAccessControlList(cannedACL);
        copyObjectRequest.setNewObjectMetadata(dstom);

        ProgressListener progressListener = new ProgressListener() {
            public void progressChanged(ProgressEvent progressEvent) {
                switch (progressEvent.getEventType()) {
                case TRANSFER_PART_COMPLETED_EVENT:
                    incrementWriteOperations();
                    break;
                default:
                    break;
                }
            }
        };

        Copy copy = transfers.copy(copyObjectRequest);
        copy.addProgressListener(progressListener);
        try {
            copy.waitForCopyResult();
            incrementWriteOperations();
            instrumentation.filesCopied(1, size);
        } catch (InterruptedException e) {
            throw new InterruptedIOException(
                    "Interrupted copying " + srcKey + " to " + dstKey + ", cancelling");
        }
    } catch (AmazonClientException e) {
        throw translateException("copyFile(" + srcKey + ", " + dstKey + ")", srcKey, e);
    }
}