Example usage for java.io IOException getCause

List of usage examples for java.io IOException getCause

Introduction

In this page you can find the example usage for java.io IOException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:de.tu_dortmund.ub.hb_ng.SolRDF.java

@Override
public String getAccessRights(String graph, String uri) throws LinkedDataStorageException {

    this.logger.info("getAccessRights: graph=" + graph);
    this.logger.info("getAccessRights: uri=" + uri);

    String accessRights = "";

    if (uri.endsWith("/about")) {

        accessRights = "public";
    } else {/*from   w w w  .  ja  v  a 2 s .  c  o  m*/

        // TODO config.properties
        String sparql = "SELECT ?o WHERE { GRAPH <http://data.ub.tu-dortmund.de/graph/"
                + this.config.getProperty("storage.graph.main") + "-public> { <" + uri
                + "/about> <http://purl.org/dc/terms#accessRights> ?o } }";

        try {

            JsonReader jsonReader = Json.createReader(
                    IOUtils.toInputStream(this.sparqlQuery(graph, URLEncoder.encode(sparql, "UTF-8"),
                            "application/sparql-results+json;charset=UTF-8"), "UTF-8"));

            JsonObject jsonObject = jsonReader.readObject();

            JsonArray bindings = jsonObject.getJsonObject("results").getJsonArray("bindings");

            if (bindings.size() == 0) {

                accessRights = "internal";
            } else {

                for (JsonObject binding : bindings.getValuesAs(JsonObject.class)) {

                    accessRights = binding.getJsonObject("o").getJsonString("value").getString();
                }
            }

            this.logger.info("accessRights: " + accessRights);
        } catch (IOException e) {

            this.logger.error("something went wrong", e);
            throw new LinkedDataStorageException(e.getMessage(), e.getCause());
        }
    }

    return accessRights;
}

From source file:org.apache.jackrabbit.core.cluster.ClusterNode.java

/**
 * Return the instance id to be used for this node in the cluster.
 * @param id configured id, <code>null</code> to generate a unique id
 *//*from w  ww. j ava  2  s .c o m*/
private String getClusterNodeId(String id) throws ClusterException {
    if (id == null) {
        id = System.getProperty(SYSTEM_PROPERTY_NODE_ID);
        if (id == null) {
            try {
                id = getClusterNodeIdFromFile();
            } catch (IOException e) {
                throw new ClusterException(e.getMessage(), e.getCause());
            }
        }
    }
    return id;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.java

static private Entry getNextLogLine(Reader in, Path path, boolean skipErrors)
        throws CorruptedLogFileException, IOException {
    try {// www . j  av a  2 s. c om
        return in.next();
    } catch (EOFException eof) {
        // truncated files are expected if a RS crashes (see HBASE-2643)
        LOG.info("EOF from hlog " + path + ".  continuing");
        return null;
    } catch (IOException e) {
        // If the IOE resulted from bad file format,
        // then this problem is idempotent and retrying won't help
        if (e.getCause() != null && (e.getCause() instanceof ParseException
                || e.getCause() instanceof org.apache.hadoop.fs.ChecksumException)) {
            LOG.warn("Parse exception " + e.getCause().toString() + " from hlog " + path + ".  continuing");
            return null;
        }
        if (!skipErrors) {
            throw e;
        }
        CorruptedLogFileException t = new CorruptedLogFileException("skipErrors=true Ignoring exception"
                + " while parsing hlog " + path + ". Marking as corrupted");
        t.initCause(e);
        throw t;
    }
}

From source file:org.apache.pulsar.broker.PulsarService.java

public synchronized LedgerOffloader createManagedLedgerOffloader(ServiceConfiguration conf)
        throws PulsarServerException {
    try {//from  ww w.j  a va 2  s .  c  o  m
        if (StringUtils.isNotBlank(conf.getManagedLedgerOffloadDriver())) {
            checkNotNull(conf.getOffloadersDirectory(),
                    "Offloader driver is configured to be '%s' but no offloaders directory is configured.",
                    conf.getManagedLedgerOffloadDriver());
            this.offloaderManager = OffloaderUtils.searchForOffloaders(conf.getOffloadersDirectory());
            LedgerOffloaderFactory offloaderFactory = this.offloaderManager
                    .getOffloaderFactory(conf.getManagedLedgerOffloadDriver());
            try {
                return offloaderFactory.create(conf.getProperties(),
                        ImmutableMap.of(METADATA_SOFTWARE_VERSION_KEY.toLowerCase(),
                                PulsarBrokerVersionStringUtils.getNormalizedVersionString(),
                                METADATA_SOFTWARE_GITSHA_KEY.toLowerCase(),
                                PulsarBrokerVersionStringUtils.getGitSha()),
                        getOffloaderScheduler(conf));
            } catch (IOException ioe) {
                throw new PulsarServerException(ioe.getMessage(), ioe.getCause());
            }
        } else {
            LOG.info("No ledger offloader configured, using NULL instance");
            return NullLedgerOffloader.INSTANCE;
        }
    } catch (Throwable t) {
        throw new PulsarServerException(t);
    }
}

From source file:de.qucosa.webapi.v1.DocumentResource.java

private void writeHtAccessFile(String qid, Document qucosaDocument)
        throws XPathExpressionException, FileNotFoundException {

    NodeList restrictedFiles = (NodeList) xPath.evaluate("//File[PathName!='' and FrontdoorVisible!='1']",
            qucosaDocument, XPathConstants.NODESET);

    File htaccess;/* w  ww  . ja v  a 2  s . c o m*/
    try {
        htaccess = fileHandlingService.newFile(qid, ".htaccess");
    } catch (IOException e) {
        log.error("Cannot create .htaccess file: " + e.getMessage());

        if (log.isDebugEnabled()) {
            log.debug("Cause: ", e.getCause());
            StringWriter sw = new StringWriter();
            PrintWriter pw = new PrintWriter(sw);
            e.printStackTrace(pw);
            log.debug("Stacktrace: ", sw);
        }

        return;
    }

    if (htaccess == null || !htaccess.exists() || !htaccess.canWrite()) {
        log.error("No write access to .htaccess file");
        return;
    }

    if (restrictedFiles.getLength() == 0) {
        if (htaccess.exists())
            htaccess.delete();
        return;
    }

    List<String> filenames = new LinkedList<>();
    for (int i = 0; i < restrictedFiles.getLength(); i++) {
        filenames.add(
                ((Element) restrictedFiles.item(i)).getElementsByTagName("PathName").item(0).getTextContent());
    }

    PrintWriter printWriter = new PrintWriter(htaccess);
    for (String filename : filenames) {
        printWriter.printf("<Files \"%s\">\n\tOrder Deny,Allow\n\tDeny From All\n</Files>\n", filename);
    }
    printWriter.close();
}

From source file:fr.dutra.confluence2wordpress.core.sync.DefaultAttachmentsSynchronizer.java

private List<SynchronizedAttachment> uploadAttachments(Set<Attachment> attachments)
        throws WordpressXmlRpcException, SynchronizationException {
    if (attachments == null || attachments.isEmpty()) {
        return null;
    }/*from w  w  w.  j a  va  2 s .c om*/
    int size = attachments.size();
    final WordpressClient client = pluginSettingsManager.getWordpressClient();
    List<FutureHolder> futures = new ArrayList<FutureHolder>(size);
    for (final Attachment attachment : attachments) {
        byte[] data;
        try {
            data = IOUtils.toByteArray(attachment.getContentsAsStream());
        } catch (IOException e) {
            throw new SynchronizationException("Cannot read attachment: " + attachment.getFileName(), e);
        }
        WordpressFile file = new WordpressFile(attachment.getFileName(), attachment.getContentType(), data);
        futures.add(new FutureHolder(attachment, client.uploadFile(file)));
    }
    List<SynchronizedAttachment> synchronizedAttachments = new ArrayList<SynchronizedAttachment>(size);
    for (FutureHolder future : futures) {
        try {
            SynchronizedAttachment synchronizedAttachment = future.toSynchronizedAttachment();
            synchronizedAttachments.add(synchronizedAttachment);
        } catch (InterruptedException e) {
            throw new WordpressXmlRpcException("Cannot upload attachment", e);
        } catch (ExecutionException e) {
            if (e.getCause() instanceof WordpressXmlRpcException) {
                throw (WordpressXmlRpcException) e.getCause();
            }
            throw new WordpressXmlRpcException("Cannot upload attachment",
                    e.getCause() == null ? e : e.getCause());
        }
    }
    return synchronizedAttachments;
}

From source file:org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.java

@Override
public void run() {
    connectToPeers();//  w  w  w . j  a  v a 2 s. c  o m
    // We were stopped while looping to connect to sinks, just abort
    if (!this.isActive()) {
        uninitialize();
        return;
    }

    int sleepMultiplier = 1;
    // delay this until we are in an asynchronous thread
    while (this.isActive() && this.peerClusterId == null) {
        this.peerClusterId = replicationPeers.getPeerUUID(this.peerId);
        if (this.isActive() && this.peerClusterId == null) {
            if (sleepForRetries("Cannot contact the peer's zk ensemble", sleepMultiplier)) {
                sleepMultiplier++;
            }
        }
    }
    // We were stopped while looping to contact peer's zk ensemble, just abort
    if (!this.isActive()) {
        uninitialize();
        return;
    }

    // resetting to 1 to reuse later
    sleepMultiplier = 1;

    // In rare case, zookeeper setting may be messed up. That leads to the incorrect
    // peerClusterId value, which is the same as the source clusterId
    if (clusterId.equals(peerClusterId)) {
        this.terminate("ClusterId " + clusterId + " is replicating to itself: peerClusterId " + peerClusterId);
    }
    LOG.info("Replicating " + clusterId + " -> " + peerClusterId);

    // If this is recovered, the queue is already full and the first log
    // normally has a position (unless the RS failed between 2 logs)
    if (this.replicationQueueInfo.isQueueRecovered()) {
        try {
            this.repLogReader.setPosition(
                    this.replicationQueues.getLogPosition(this.peerClusterZnode, this.queue.peek().getName()));
            if (LOG.isTraceEnabled()) {
                LOG.trace("Recovered queue started with log " + this.queue.peek() + " at position "
                        + this.repLogReader.getPosition());
            }
        } catch (ReplicationException e) {
            this.terminate("Couldn't get the position of this recovered queue " + this.peerClusterZnode, e);
        }
    }
    // Loop until we close down
    while (isActive()) {
        // Sleep until replication is enabled again
        if (!isPeerEnabled()) {
            if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        Path oldPath = getCurrentPath(); //note that in the current scenario,
                                         //oldPath will be null when a log roll
                                         //happens.
                                         // Get a new path
        boolean hasCurrentPath = getNextPath();
        if (getCurrentPath() != null && oldPath == null) {
            sleepMultiplier = 1; //reset the sleepMultiplier on a path change
        }
        if (!hasCurrentPath) {
            if (sleepForRetries("No log to process", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        boolean currentWALisBeingWrittenTo = false;
        //For WAL files we own (rather than recovered), take a snapshot of whether the
        //current WAL file (this.currentPath) is in use (for writing) NOW!
        //Since the new WAL paths are enqueued only after the prev WAL file
        //is 'closed', presence of an element in the queue means that
        //the previous WAL file was closed, else the file is in use (currentPath)
        //We take the snapshot now so that we are protected against races
        //where a new file gets enqueued while the current file is being processed
        //(and where we just finished reading the current file).
        if (!this.replicationQueueInfo.isQueueRecovered() && queue.size() == 0) {
            currentWALisBeingWrittenTo = true;
        }
        // Open a reader on it
        if (!openReader(sleepMultiplier)) {
            // Reset the sleep multiplier, else it'd be reused for the next file
            sleepMultiplier = 1;
            continue;
        }

        // If we got a null reader but didn't continue, then sleep and continue
        if (this.reader == null) {
            if (sleepForRetries("Unable to open a reader", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }

        boolean gotIOE = false;
        currentNbOperations = 0;
        List<HLog.Entry> entries = new ArrayList<HLog.Entry>(1);
        currentSize = 0;
        try {
            if (readAllEntriesToReplicateOrNextFile(currentWALisBeingWrittenTo, entries)) {
                continue;
            }
        } catch (IOException ioe) {
            LOG.warn(this.peerClusterZnode + " Got: ", ioe);
            gotIOE = true;
            if (ioe.getCause() instanceof EOFException) {

                boolean considerDumping = false;
                if (this.replicationQueueInfo.isQueueRecovered()) {
                    try {
                        FileStatus stat = this.fs.getFileStatus(this.currentPath);
                        if (stat.getLen() == 0) {
                            LOG.warn(this.peerClusterZnode + " Got EOF and the file was empty");
                        }
                        considerDumping = true;
                    } catch (IOException e) {
                        LOG.warn(this.peerClusterZnode + " Got while getting file size: ", e);
                    }
                }

                if (considerDumping && sleepMultiplier == this.maxRetriesMultiplier && processEndOfFile()) {
                    continue;
                }
            }
        } finally {
            try {
                this.reader = null;
                this.repLogReader.closeReader();
            } catch (IOException e) {
                gotIOE = true;
                LOG.warn("Unable to finalize the tailing of a file", e);
            }
        }

        // If we didn't get anything to replicate, or if we hit a IOE,
        // wait a bit and retry.
        // But if we need to stop, don't bother sleeping
        if (this.isActive() && (gotIOE || entries.isEmpty())) {
            if (this.lastLoggedPosition != this.repLogReader.getPosition()) {
                this.manager.logPositionAndCleanOldLogs(this.currentPath, this.peerClusterZnode,
                        this.repLogReader.getPosition(), this.replicationQueueInfo.isQueueRecovered(),
                        currentWALisBeingWrittenTo);
                this.lastLoggedPosition = this.repLogReader.getPosition();
            }
            // Reset the sleep multiplier if nothing has actually gone wrong
            if (!gotIOE) {
                sleepMultiplier = 1;
            }
            if (sleepForRetries("Nothing to replicate", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        sleepMultiplier = 1;
        shipEdits(currentWALisBeingWrittenTo, entries);
    }
    uninitialize();
}

From source file:org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.java

/**
 * Open a reader on the current path//from   w  w w  .  j  a  v  a2s  . c o m
 *
 * @param sleepMultiplier by how many times the default sleeping time is augmented
 * @return true if we should continue with that file, false if we are over with it
 */
protected boolean openReader(int sleepMultiplier) {
    try {
        try {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Opening log " + this.currentPath);
            }
            this.reader = repLogReader.openReader(this.currentPath);
        } catch (FileNotFoundException fnfe) {
            if (this.replicationQueueInfo.isQueueRecovered()) {
                // We didn't find the log in the archive directory, look if it still
                // exists in the dead RS folder (there could be a chain of failures
                // to look at)
                List<String> deadRegionServers = this.replicationQueueInfo.getDeadRegionServers();
                LOG.info("NB dead servers : " + deadRegionServers.size());
                for (String curDeadServerName : deadRegionServers) {
                    Path deadRsDirectory = new Path(manager.getLogDir().getParent(), curDeadServerName);
                    Path[] locs = new Path[] { new Path(deadRsDirectory, currentPath.getName()),
                            new Path(deadRsDirectory.suffix(HLog.SPLITTING_EXT), currentPath.getName()), };
                    for (Path possibleLogLocation : locs) {
                        LOG.info("Possible location " + possibleLogLocation.toUri().toString());
                        if (this.manager.getFs().exists(possibleLogLocation)) {
                            // We found the right new location
                            LOG.info("Log " + this.currentPath + " still exists at " + possibleLogLocation);
                            // Breaking here will make us sleep since reader is null
                            return true;
                        }
                    }
                }
                // In the case of disaster/recovery, HMaster may be shutdown/crashed before flush data
                // from .logs to .oldlogs. Loop into .logs folders and check whether a match exists
                if (stopper instanceof ReplicationSyncUp.DummyServer) {
                    FileStatus[] rss = fs.listStatus(manager.getLogDir());
                    for (FileStatus rs : rss) {
                        Path p = rs.getPath();
                        FileStatus[] logs = fs.listStatus(p);
                        for (FileStatus log : logs) {
                            p = new Path(p, log.getPath().getName());
                            if (p.getName().equals(currentPath.getName())) {
                                currentPath = p;
                                LOG.info("Log " + this.currentPath + " exists under " + manager.getLogDir());
                                // Open the log at the new location
                                this.openReader(sleepMultiplier);
                                return true;
                            }
                        }
                    }
                }

                // TODO What happens if the log was missing from every single location?
                // Although we need to check a couple of times as the log could have
                // been moved by the master between the checks
                // It can also happen if a recovered queue wasn't properly cleaned,
                // such that the znode pointing to a log exists but the log was
                // deleted a long time ago.
                // For the moment, we'll throw the IO and processEndOfFile
                throw new IOException("File from recovered queue is " + "nowhere to be found", fnfe);
            } else {
                // If the log was archived, continue reading from there
                Path archivedLogLocation = new Path(manager.getOldLogDir(), currentPath.getName());
                if (this.manager.getFs().exists(archivedLogLocation)) {
                    currentPath = archivedLogLocation;
                    LOG.info("Log " + this.currentPath + " was moved to " + archivedLogLocation);
                    // Open the log at the new location
                    this.openReader(sleepMultiplier);

                }
                // TODO What happens the log is missing in both places?
            }
        }
    } catch (IOException ioe) {
        if (ioe instanceof EOFException && isCurrentLogEmpty())
            return true;
        LOG.warn(this.peerClusterZnode + " Got: ", ioe);
        this.reader = null;
        if (ioe.getCause() instanceof NullPointerException) {
            // Workaround for race condition in HDFS-4380
            // which throws a NPE if we open a file before any data node has the most recent block
            // Just sleep and retry. Will require re-reading compressed HLogs for compressionContext.
            LOG.warn("Got NPE opening reader, will retry.");
        } else if (sleepMultiplier == this.maxRetriesMultiplier) {
            // TODO Need a better way to determine if a file is really gone but
            // TODO without scanning all logs dir
            LOG.warn("Waited too long for this file, considering dumping");
            return !processEndOfFile();
        }
    }
    return true;
}

From source file:org.apache.jmeter.protocol.http.proxy.JMeterProxyControl.java

/**
 * Initialise the dynamic domain keystore
 *///  ww w .j a va  2  s.  com
private void initDynamicKeyStore() throws IOException, GeneralSecurityException {
    if (storePassword != null) { // Assume we have already created the store
        try {
            sslKeyStore = getKeyStore(storePassword.toCharArray());
            for (String alias : KeyToolUtils.getCAaliases()) {
                X509Certificate caCert = (X509Certificate) sslKeyStore.getCertificate(alias);
                if (caCert == null) {
                    sslKeyStore = null; // no CA key - probably the wrong store type.
                    break; // cannot continue
                } else {
                    caCert.checkValidity(new Date(System.currentTimeMillis() + DateUtils.MILLIS_PER_DAY));
                    LOG.info("Valid alias found for " + alias);
                }
            }
        } catch (IOException e) { // store is faulty, we need to recreate it
            sslKeyStore = null; // if cert is not valid, flag up to recreate it
            if (e.getCause() instanceof UnrecoverableKeyException) {
                LOG.warn(
                        "Could not read key store " + e.getMessage() + "; cause: " + e.getCause().getMessage());
            } else {
                LOG.warn("Could not open/read key store " + e.getMessage()); // message includes the file name
            }
        } catch (GeneralSecurityException e) {
            sslKeyStore = null; // if cert is not valid, flag up to recreate it
            LOG.warn("Problem reading key store: " + e.getMessage());
        }
    }
    if (sslKeyStore == null) { // no existing file or not valid
        storePassword = RandomStringUtils.randomAlphanumeric(20); // Alphanum to avoid issues with command-line quoting
        keyPassword = storePassword; // we use same password for both
        setPassword(storePassword);
        LOG.info("Creating Proxy CA in " + CERT_PATH_ABS);
        KeyToolUtils.generateProxyCA(CERT_PATH, storePassword, CERT_VALIDITY);
        LOG.info("Created keystore in " + CERT_PATH_ABS);
        sslKeyStore = getKeyStore(storePassword.toCharArray()); // This should now work
    }
    final String sslDomains = getSslDomains().trim();
    if (sslDomains.length() > 0) {
        final String[] domains = sslDomains.split(",");
        // The subject may be either a host or a domain
        for (String subject : domains) {
            if (isValid(subject)) {
                if (!sslKeyStore.containsAlias(subject)) {
                    LOG.info("Creating entry " + subject + " in " + CERT_PATH_ABS);
                    KeyToolUtils.generateHostCert(CERT_PATH, storePassword, subject, CERT_VALIDITY);
                    sslKeyStore = getKeyStore(storePassword.toCharArray()); // reload to pick up new aliases
                    // reloading is very quick compared with creating an entry currently
                }
            } else {
                LOG.warn("Attempt to create an invalid domain certificate: " + subject);
            }
        }
    }
}

From source file:fr.univrouen.poste.web.candidat.MyPosteCandidatureController.java

@RequestMapping(value = "/{id}/templateReviewFile/{idFile}")
@PreAuthorize("hasPermission(#id, 'review')")
public void downloadTemplateReviewFile(@PathVariable("id") Long id, @PathVariable("idFile") Long idFile,
        HttpServletRequest request, HttpServletResponse response) throws IOException, SQLException {
    try {/*from  ww w  . ja  v a  2 s. co  m*/
        PosteCandidature postecandidature = PosteCandidature.findPosteCandidature(id);

        TemplateFile templateFile = TemplateFile.findTemplateFile(idFile);
        InputStream templateDocx = templateFile.getBigFile().getBinaryFile().getBinaryStream();

        String filename = postecandidature.getPoste().getNumEmploi() + "-" + postecandidature.getNumCandidat()
                + "-" + templateFile.getFilename();

        response.setContentType("application/vnd.openxmlformats-officedocument.wordprocessingml.document");
        response.setHeader("Content-Disposition", "attachment; filename=\"" + filename + "\"");

        templateService.generateTemplateFile(templateDocx, postecandidature, response.getOutputStream());

    } catch (IOException ioe) {
        String ip = request.getRemoteAddr();
        logger.warn("Download IOException, that can be just because the client [" + ip
                + "] canceled the download process : " + ioe.getCause());
    }
}