Example usage for java.io IOException initCause

List of usage examples for java.io IOException initCause

Introduction

In this page you can find the example usage for java.io IOException initCause.

Prototype

public synchronized Throwable initCause(Throwable cause) 

Source Link

Document

Initializes the cause of this throwable to the specified value.

Usage

From source file:org.apache.jackrabbit.core.query.lucene.SearchIndex.java

/**
 * Creates an excerpt provider for the given <code>query</code>.
 *
 * @param query the query.//www  .ja va  2  s. c o  m
 * @return an excerpt provider for the given <code>query</code>.
 * @throws IOException if the provider cannot be created.
 */
public ExcerptProvider createExcerptProvider(Query query) throws IOException {
    ExcerptProvider ep;
    try {
        ep = (ExcerptProvider) excerptProviderClass.newInstance();
    } catch (Exception e) {
        IOException ex = new IOException();
        ex.initCause(e);
        throw ex;
    }
    ep.init(query, this);
    return ep;
}

From source file:org.apache.hadoop.mapred.MapTask.java

@SuppressWarnings("unchecked")
private <T> T getSplitDetails(Path file, long offset) throws IOException {
    FileSystem fs = file.getFileSystem(conf);
    FSDataInputStream inFile = fs.open(file);
    inFile.seek(offset);//from  w w w .jav a  2 s. com
    String className = Text.readString(inFile);
    Class<T> cls;
    try {
        cls = (Class<T>) conf.getClassByName(className);
    } catch (ClassNotFoundException ce) {
        IOException wrap = new IOException("Split class " + className + " not found");
        wrap.initCause(ce);
        throw wrap;
    }
    SerializationFactory factory = new SerializationFactory(conf);
    Deserializer<T> deserializer = (Deserializer<T>) factory.getDeserializer(cls);
    deserializer.open(inFile);
    T split = deserializer.deserialize(null);
    long pos = inFile.getPos();
    getCounters().findCounter(Task.Counter.SPLIT_RAW_BYTES).increment(pos - offset);
    inFile.close();
    return split;
}

From source file:org.geotools.gce.imagemosaic.catalogbuilder.CatalogBuilder.java

private void indexingPreamble() throws IOException {

    ///* w  ww.j a va  2 s.c o m*/
    // declaring a precision model to adhere the java double type
    // precision
    //
    final PrecisionModel precMod = new PrecisionModel(PrecisionModel.FLOATING);
    geomFactory = new GeometryFactory(precMod);

    //
    // create the index
    //
    // do we have a datastore.properties file?
    final File parent = new File(runConfiguration.getRootMosaicDirectory());
    final File datastoreProperties = new File(parent, "datastore.properties");
    if (Utils.checkFileReadable(datastoreProperties)) {
        // read the properties file
        Properties properties = Utils.loadPropertiesFromURL(DataUtilities.fileToURL(datastoreProperties));
        if (properties == null)
            throw new IOException("Unable to load properties from:" + datastoreProperties.getAbsolutePath());

        // SPI
        final String SPIClass = properties.getProperty("SPI");
        try {
            // create a datastore as instructed
            final DataStoreFactorySpi spi = (DataStoreFactorySpi) Class.forName(SPIClass).newInstance();
            final Map<String, Serializable> params = Utils.createDataStoreParamsFromPropertiesFile(properties,
                    spi);

            // set ParentLocation parameter since for embedded database like H2 we must change the database
            // to incorporate the path where to write the db 
            params.put("ParentLocation", DataUtilities.fileToURL(parent).toExternalForm());
            catalog = GranuleCatalogFactory.createGranuleCatalog(params, false, true, spi);
        } catch (ClassNotFoundException e) {
            final IOException ioe = new IOException();
            throw (IOException) ioe.initCause(e);
        } catch (InstantiationException e) {
            final IOException ioe = new IOException();
            throw (IOException) ioe.initCause(e);
        } catch (IllegalAccessException e) {
            final IOException ioe = new IOException();
            throw (IOException) ioe.initCause(e);
        }
    } else {

        // we do not have a datastore properties file therefore we continue with a shapefile datastore
        final URL file = new File(parent, runConfiguration.getIndexName() + ".shp").toURI().toURL();
        final Map<String, Serializable> params = new HashMap<String, Serializable>();
        params.put(ShapefileDataStoreFactory.URLP.key, file);
        if (file.getProtocol().equalsIgnoreCase("file"))
            params.put(ShapefileDataStoreFactory.CREATE_SPATIAL_INDEX.key, Boolean.TRUE);
        params.put(ShapefileDataStoreFactory.MEMORY_MAPPED.key, Boolean.TRUE);
        params.put(ShapefileDataStoreFactory.DBFTIMEZONE.key, TimeZone.getTimeZone("UTC"));
        catalog = GranuleCatalogFactory.createGranuleCatalog(params, false, true, Utils.SHAPE_SPI);
    }

    //
    // creating a mosaic runConfiguration bean to store the properties file elements         
    //
    mosaicConfiguration = new MosaicConfigurationBean();
    mosaicConfiguration.setName(runConfiguration.getIndexName());

    //
    // IMPOSED ENVELOPE
    //
    String bbox = runConfiguration.getEnvelope2D();
    try {
        this.imposedBBox = Utils.parseEnvelope(bbox);
    } catch (Exception e) {
        this.imposedBBox = null;
        if (LOGGER.isLoggable(Level.WARNING))
            LOGGER.log(Level.WARNING, "Unable to parse imposed bbox", e);
    }
    mosaicConfiguration.setCaching(runConfiguration.isCaching());
    //
    // load property collectors
    //
    loadPropertyCollectors();

}

From source file:com.atomicleopard.thundr.ftp.commons.FTP.java

/**
 * Initiates control connections and gets initial reply.
 * Initializes {@link #_controlInput_} and {@link #_controlOutput_}.
 *///from w w  w .  j a  va 2 s.  c  o m
@Override
protected void _connectAction_() throws IOException {
    super._connectAction_(); // sets up _input_ and _output_
    _controlInput_ = new CRLFLineReader(new InputStreamReader(_input_, getControlEncoding()));
    _controlOutput_ = new BufferedWriter(new OutputStreamWriter(_output_, getControlEncoding()));
    if (connectTimeout > 0) { // NET-385
        int original = _socket_.getSoTimeout();
        _socket_.setSoTimeout(connectTimeout);
        try {
            __getReply();
            // If we received code 120, we have to fetch completion reply.
            if (FTPReply.isPositivePreliminary(_replyCode)) {
                __getReply();
            }
        } catch (SocketTimeoutException e) {
            IOException ioe = new IOException("Timed out waiting for initial connect reply");
            ioe.initCause(e);
            throw ioe;
        } finally {
            _socket_.setSoTimeout(original);
        }
    } else {
        __getReply();
        // If we received code 120, we have to fetch completion reply.
        if (FTPReply.isPositivePreliminary(_replyCode)) {
            __getReply();
        }
    }
}

From source file:org.apache.jackrabbit.core.query.lucene.MultiIndex.java

/**
 * Creates an initial index by traversing the node hierarchy starting at the
 * node with <code>rootId</code>.
 *
 * @param stateMgr the item state manager.
 * @param rootId   the id of the node from where to start.
 * @throws IOException           if an error occurs while indexing the
 *                               workspace.
 * @throws IllegalStateException if this index is not empty.
 *//*from w w  w.ja va2  s. c  o  m*/
void createInitialIndex(ItemStateManager stateMgr, NodeId rootId) throws IOException {
    // only do an initial index if there are no indexes at all
    if (indexNames.size() == 0) {
        reindexing = true;
        try {
            // traverse and index workspace
            executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
            NodeState rootState = (NodeState) stateMgr.getItemState(rootId);
            createIndex(rootState, stateMgr);
            executeAndLog(new Commit(getTransactionId()));
            scheduleFlushTask();
        } catch (Exception e) {
            String msg = "Error indexing workspace";
            IOException ex = new IOException(msg);
            ex.initCause(e);
            throw ex;
        } finally {
            reindexing = false;
        }
    } else {
        throw new IllegalStateException("Index already present");
    }
}

From source file:org.apache.jackrabbit.core.query.lucene.MultiIndex.java

/**
 * Creates an initial index by traversing the node hierarchy starting at the
 * node with <code>rootId</code>.
 *
 * @param stateMgr the item state manager.
 * @param rootId   the id of the node from where to start.
 * @throws IOException           if an error occurs while indexing the
 *                               workspace.
 * @throws IllegalStateException if this index is not empty.
 *//*from w ww  .ja va 2  s .  c o  m*/
void createInitialIndex(ItemStateManager stateMgr, NodeId rootId, Path rootPath) throws IOException {
    // only do an initial index if there are no indexes at all
    if (indexNames.size() == 0) {
        reindexing = true;
        try {
            // traverse and index workspace
            executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
            NodeState rootState = (NodeState) stateMgr.getItemState(rootId);
            createIndex(rootState, rootPath, stateMgr);
            executeAndLog(new Commit(getTransactionId()));
            scheduleFlushTask();
        } catch (Exception e) {
            String msg = "Error indexing workspace";
            IOException ex = new IOException(msg);
            ex.initCause(e);
            throw ex;
        } finally {
            reindexing = false;
        }
    } else {
        throw new IllegalStateException("Index already present");
    }
}

From source file:org.apache.jackrabbit.core.query.lucene.MultiIndex.java

/**
 * Creates a new MultiIndex./*  ww w .j a v a2  s.  co  m*/
 *
 * @param indexDir the base file system
 * @param handler the search handler
 * @param excludedIDs   Set&lt;NodeId> that contains uuids that should not
 *                      be indexed nor further traversed.
 * @param mapping the namespace mapping to use
 * @throws IOException if an error occurs
 */
MultiIndex(File indexDir, SearchIndex handler, Set excludedIDs, NamespaceMappings mapping) throws IOException {

    this.indexDir = indexDir;
    this.handler = handler;
    this.cache = new DocNumberCache(handler.getCacheSize());
    this.redoLog = new RedoLog(new File(indexDir, REDO_LOG));
    this.excludedIDs = new HashSet(excludedIDs);
    this.nsMappings = mapping;

    if (indexNames.exists(indexDir)) {
        indexNames.read(indexDir);
    }
    if (deletable.exists(indexDir)) {
        deletable.read(indexDir);
    }

    // try to remove deletable files if there are any
    attemptDelete();

    // initialize IndexMerger
    merger = new IndexMerger(this);
    merger.setMaxMergeDocs(handler.getMaxMergeDocs());
    merger.setMergeFactor(handler.getMergeFactor());
    merger.setMinMergeDocs(handler.getMinMergeDocs());

    IndexingQueueStore store;
    try {
        LocalFileSystem fs = new LocalFileSystem();
        fs.setRoot(indexDir);
        fs.init();
        store = new IndexingQueueStore(fs, INDEXING_QUEUE_FILE);
    } catch (FileSystemException e) {
        IOException ex = new IOException();
        ex.initCause(e);
        throw ex;
    }

    // initialize indexing queue
    this.indexingQueue = new IndexingQueue(store);

    // open persistent indexes
    for (int i = 0; i < indexNames.size(); i++) {
        File sub = new File(indexDir, indexNames.getName(i));
        // only open if it still exists
        // it is possible that indexNames still contains a name for
        // an index that has been deleted, but indexNames has not been
        // written to disk.
        if (!sub.exists()) {
            log.debug("index does not exist anymore: " + sub.getAbsolutePath());
            // move on to next index
            continue;
        }
        PersistentIndex index = new PersistentIndex(indexNames.getName(i), sub, handler.getTextAnalyzer(),
                handler.getSimilarity(), cache, indexingQueue);
        index.setMaxMergeDocs(handler.getMaxMergeDocs());
        index.setMergeFactor(handler.getMergeFactor());
        index.setMinMergeDocs(handler.getMinMergeDocs());
        index.setMaxFieldLength(handler.getMaxFieldLength());
        index.setUseCompoundFile(handler.getUseCompoundFile());
        indexes.add(index);
        merger.indexAdded(index.getName(), index.getNumDocuments());
    }

    // init volatile index
    resetVolatileIndex();

    // set index format version
    IndexReader reader = getIndexReader();
    try {
        version = IndexFormatVersion.getVersion(reader);
    } finally {
        reader.close();
    }

    indexingQueue.initialize(this);

    redoLogApplied = redoLog.hasEntries();

    // run recovery
    Recovery.run(this, redoLog);

    // now that we are ready, start index merger
    merger.start();

    if (redoLogApplied) {
        // wait for the index merge to finish pending jobs
        try {
            merger.waitUntilIdle();
        } catch (InterruptedException e) {
            // move on
        }
        flush();
    }

    flushTask = new Timer.Task() {
        public void run() {
            // check if there are any indexing jobs finished
            checkIndexingQueue();
            // check if volatile index should be flushed
            checkFlush();
        }
    };

    if (indexNames.size() > 0) {
        scheduleFlushTask();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

/**
 * Write out current snapshot.  Presumes {@link #snapshot()} has been called
 * previously.//  w  ww .j  ava2 s .  co  m
 * @param logCacheFlushId flush sequence number
 * @param snapshot
 * @param status
 * @return The path name of the tmp file to which the store was flushed
 * @throws IOException
 */
protected List<Path> flushCache(final long logCacheFlushId, MemStoreSnapshot snapshot, MonitoredTask status)
        throws IOException {
    // If an exception happens flushing, we let it out without clearing
    // the memstore snapshot.  The old snapshot will be returned when we say
    // 'snapshot', the next time flush comes around.
    // Retry after catching exception when flushing, otherwise server will abort
    // itself
    StoreFlusher flusher = storeEngine.getStoreFlusher();
    IOException lastException = null;
    for (int i = 0; i < flushRetriesNumber; i++) {
        try {
            List<Path> pathNames = flusher.flushSnapshot(snapshot, logCacheFlushId, status);
            Path lastPathName = null;
            try {
                for (Path pathName : pathNames) {
                    lastPathName = pathName;
                    validateStoreFile(pathName);
                }
                return pathNames;
            } catch (Exception e) {
                LOG.warn("Failed validating store file " + lastPathName + ", retrying num=" + i, e);
                if (e instanceof IOException) {
                    lastException = (IOException) e;
                } else {
                    lastException = new IOException(e);
                }
            }
        } catch (IOException e) {
            LOG.warn("Failed flushing store file, retrying num=" + i, e);
            lastException = e;
        }
        if (lastException != null && i < (flushRetriesNumber - 1)) {
            try {
                Thread.sleep(pauseTime);
            } catch (InterruptedException e) {
                IOException iie = new InterruptedIOException();
                iie.initCause(e);
                throw iie;
            }
        }
    }
    throw lastException;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.java

boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) throws IOException {
    boolean isCorrupted = false;
    Preconditions.checkState(status == null);
    boolean skipErrors = conf.getBoolean("hbase.hlog.split.skip.errors", HLog.SPLIT_SKIP_ERRORS_DEFAULT);
    int interval = conf.getInt("hbase.splitlog.report.interval.loglines", 1024);
    Path logPath = logfile.getPath();
    boolean outputSinkStarted = false;
    boolean progress_failed = false;
    int editsCount = 0;
    int editsSkipped = 0;

    try {//from   w  w w  . j a  v  a  2s .c  o m
        status = TaskMonitor.get()
                .createStatus("Splitting log file " + logfile.getPath() + "into a temporary staging area.");
        long logLength = logfile.getLen();
        LOG.info("Splitting hlog: " + logPath + ", length=" + logLength);
        LOG.info("DistributedLogReplay = " + this.distributedLogReplay);
        status.setStatus("Opening log file");
        if (reporter != null && !reporter.progress()) {
            progress_failed = true;
            return false;
        }
        Reader in = null;
        try {
            in = getReader(fs, logfile, conf, skipErrors, reporter);
        } catch (CorruptedLogFileException e) {
            LOG.warn("Could not get reader, corrupted log file " + logPath, e);
            ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
            isCorrupted = true;
        }
        if (in == null) {
            status.markComplete("Was nothing to split in log file");
            LOG.warn("Nothing to split in log file " + logPath);
            return true;
        }
        if (watcher != null && csm != null) {
            try {
                TableStateManager tsm = csm.getTableStateManager();
                disablingOrDisabledTables = tsm.getTablesInStates(ZooKeeperProtos.Table.State.DISABLED,
                        ZooKeeperProtos.Table.State.DISABLING);
            } catch (CoordinatedStateException e) {
                throw new IOException("Can't get disabling/disabled tables", e);
            }
        }
        int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
        int numOpenedFilesLastCheck = 0;
        outputSink.setReporter(reporter);
        outputSink.startWriterThreads();
        outputSinkStarted = true;
        Entry entry;
        Long lastFlushedSequenceId = -1L;
        ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(logPath);
        failedServerName = (serverName == null) ? "" : serverName.getServerName();
        while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) {
            byte[] region = entry.getKey().getEncodedRegionName();
            String key = Bytes.toString(region);
            lastFlushedSequenceId = lastFlushedSequenceIds.get(key);
            if (lastFlushedSequenceId == null) {
                if (this.distributedLogReplay) {
                    RegionStoreSequenceIds ids = SplitLogManager.getRegionFlushedSequenceId(this.watcher,
                            failedServerName, key);
                    if (ids != null) {
                        lastFlushedSequenceId = ids.getLastFlushedSequenceId();
                    }
                } else if (sequenceIdChecker != null) {
                    lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region);
                }
                if (lastFlushedSequenceId == null) {
                    lastFlushedSequenceId = -1L;
                }
                lastFlushedSequenceIds.put(key, lastFlushedSequenceId);
            }
            if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
                editsSkipped++;
                continue;
            }
            entryBuffers.appendEntry(entry);
            editsCount++;
            int moreWritersFromLastCheck = this.getNumOpenWriters() - numOpenedFilesLastCheck;
            // If sufficient edits have passed, check if we should report progress.
            if (editsCount % interval == 0 || moreWritersFromLastCheck > numOpenedFilesBeforeReporting) {
                numOpenedFilesLastCheck = this.getNumOpenWriters();
                String countsStr = (editsCount - (editsSkipped + outputSink.getSkippedEdits()))
                        + " edits, skipped " + editsSkipped + " edits.";
                status.setStatus("Split " + countsStr);
                if (reporter != null && !reporter.progress()) {
                    progress_failed = true;
                    return false;
                }
            }
        }
    } catch (InterruptedException ie) {
        IOException iie = new InterruptedIOException();
        iie.initCause(ie);
        throw iie;
    } catch (CorruptedLogFileException e) {
        LOG.warn("Could not parse, corrupted log file " + logPath, e);
        ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
        isCorrupted = true;
    } catch (IOException e) {
        e = RemoteExceptionHandler.checkIOException(e);
        throw e;
    } finally {
        LOG.debug("Finishing writing output logs and closing down.");
        if (outputSinkStarted) {
            progress_failed = outputSink.finishWritingAndClose() == null;
        }
        String msg = "Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions()
                + " regions; log file=" + logPath + " is corrupted = " + isCorrupted + " progress failed = "
                + progress_failed;
        LOG.info(msg);
        status.markComplete(msg);
    }
    return !progress_failed;
}

From source file:org.geoserver.catalog.ResourcePool.java

/**
 * Loads a grid coverage.//from w  ww .j  a v  a2 s .co  m
 * <p>
 * 
 * </p>
 * 
 * @param info The grid coverage metadata.
 * @param envelope The section of the coverage to load. 
 * @param hints Hints to use while loading the coverage.
 * 
 * @throws IOException Any errors that occur loading the coverage.
 */
@SuppressWarnings("deprecation")
public GridCoverage getGridCoverage(CoverageInfo info, GridCoverageReader reader, ReferencedEnvelope env,
        Hints hints) throws IOException {

    ReferencedEnvelope coverageBounds;
    try {
        coverageBounds = info.boundingBox();
    } catch (Exception e) {
        throw (IOException) new IOException("unable to calculate coverage bounds").initCause(e);
    }

    GeneralEnvelope envelope = null;
    if (env == null) {
        envelope = new GeneralEnvelope(coverageBounds);
    } else {
        envelope = new GeneralEnvelope(env);
    }

    // /////////////////////////////////////////////////////////
    //
    // Do we need to proceed?
    // I need to check the requested envelope in order to see if the
    // coverage we ask intersect it otherwise it is pointless to load it
    // since its reader might return null;
    // /////////////////////////////////////////////////////////
    final CoordinateReferenceSystem sourceCRS = envelope.getCoordinateReferenceSystem();
    CoordinateReferenceSystem destCRS;
    try {
        destCRS = info.getCRS();
    } catch (Exception e) {
        final IOException ioe = new IOException("unable to determine coverage crs");
        ioe.initCause(e);
        throw ioe;
    }

    if (!CRS.equalsIgnoreMetadata(sourceCRS, destCRS)) {
        // get a math transform
        MathTransform transform;
        try {
            transform = CRS.findMathTransform(sourceCRS, destCRS, true);
        } catch (FactoryException e) {
            final IOException ioe = new IOException("unable to determine coverage crs");
            ioe.initCause(e);
            throw ioe;
        }

        // transform the envelope
        if (!transform.isIdentity()) {
            try {
                envelope = CRS.transform(transform, envelope);
            } catch (TransformException e) {
                throw (IOException) new IOException("error occured transforming envelope").initCause(e);
            }
        }
    }

    // just do the intersection since
    envelope.intersect(coverageBounds);

    if (envelope.isEmpty()) {
        return null;
    }

    envelope.setCoordinateReferenceSystem(destCRS);

    // /////////////////////////////////////////////////////////
    //
    // Reading the coverage
    //
    // /////////////////////////////////////////////////////////

    GridCoverage gc = reader
            .read(CoverageUtils.getParameters(reader.getFormat().getReadParameters(), info.getParameters()));

    if ((gc == null) || !(gc instanceof GridCoverage2D)) {
        throw new IOException("The requested coverage could not be found.");
    }

    return gc;
}