List of usage examples for java.io RandomAccessFile close
public void close() throws IOException
From source file:eu.alefzero.webdav.FileRequestEntity.java
@Override public void writeRequest(final OutputStream out) throws IOException { //byte[] tmp = new byte[4096]; ByteBuffer tmp = ByteBuffer.allocate(4096); int readResult = 0; // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it // globally in some fashionable manner RandomAccessFile raf = new RandomAccessFile(mFile, "r"); FileChannel channel = raf.getChannel(); Iterator<OnDatatransferProgressListener> it = null; long transferred = 0; long size = mFile.length(); if (size == 0) size = -1;//from w w w . j av a 2s. c o m try { while ((readResult = channel.read(tmp)) >= 0) { out.write(tmp.array(), 0, readResult); tmp.clear(); transferred += readResult; synchronized (mDataTransferListeners) { it = mDataTransferListeners.iterator(); while (it.hasNext()) { it.next().onTransferProgress(readResult, transferred, size, mFile.getName()); } } } } catch (IOException io) { Log_OC.e("FileRequestException", io.getMessage()); throw new RuntimeException( "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really", io); } finally { channel.close(); raf.close(); } }
From source file:au.org.ala.layers.grid.GridClassBuilder.java
public static HashMap<Integer, GridClass> buildFromGrid(String filePath) throws IOException { File wktDir = new File(filePath); wktDir.mkdirs();//from w ww. j a va2s .com int[] wktMap = null; //track values for the SLD ArrayList<Integer> maxValues = new ArrayList<Integer>(); ArrayList<String> labels = new ArrayList<String>(); HashMap<Integer, GridClass> classes = new HashMap<Integer, GridClass>(); Properties p = new Properties(); p.load(new FileReader(filePath + ".txt")); boolean mergeProperties = false; Map<String, Set<Integer>> groupedKeys = new HashMap<String, Set<Integer>>(); Map<Integer, Integer> translateKeys = new HashMap<Integer, Integer>(); Map<String, Integer> translateValues = new HashMap<String, Integer>(); ArrayList<Integer> keys = new ArrayList<Integer>(); for (String key : p.stringPropertyNames()) { try { int k = Integer.parseInt(key); keys.add(k); //grouping of property file keys by value String value = p.getProperty(key); Set<Integer> klist = groupedKeys.get(value); if (klist == null) klist = new HashSet<Integer>(); else mergeProperties = true; klist.add(k); groupedKeys.put(value, klist); if (!translateValues.containsKey(value)) translateValues.put(value, translateValues.size() + 1); translateKeys.put(k, translateValues.get(value)); } catch (NumberFormatException e) { logger.info("Excluding shape key '" + key + "'"); } catch (Exception e) { logger.error(e.getMessage(), e); } } java.util.Collections.sort(keys); Grid g = new Grid(filePath); boolean generateWkt = false; //((long) g.nrows) * ((long) g.ncols) < (long) Integer.MAX_VALUE; if (mergeProperties) { g.replaceValues(translateKeys); if (!new File(filePath + ".txt.old").exists()) FileUtils.moveFile(new File(filePath + ".txt"), new File(filePath + ".txt.old")); StringBuilder sb = new StringBuilder(); for (String value : translateValues.keySet()) { sb.append(translateValues.get(value)).append("=").append(value).append('\n'); } FileUtils.writeStringToFile(new File(filePath + ".txt"), sb.toString()); return buildFromGrid(filePath); } if (generateWkt) { for (String name : groupedKeys.keySet()) { try { Set<Integer> klist = groupedKeys.get(name); String key = klist.iterator().next().toString(); int k = Integer.parseInt(key); GridClass gc = new GridClass(); gc.setName(name); gc.setId(k); if (klist.size() == 1) klist = null; logger.info("getting wkt for " + filePath + " > " + key); Map wktIndexed = Envelope.getGridSingleLayerEnvelopeAsWktIndexed( filePath + "," + key + "," + key, klist, wktMap); //write class wkt File zipFile = new File(filePath + File.separator + key + ".wkt.zip"); ZipOutputStream zos = null; try { zos = new ZipOutputStream(new FileOutputStream(zipFile)); zos.putNextEntry(new ZipEntry(key + ".wkt")); zos.write(((String) wktIndexed.get("wkt")).getBytes()); zos.flush(); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (zos != null) { try { zos.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } BufferedOutputStream bos = null; try { bos = new BufferedOutputStream( new FileOutputStream(filePath + File.separator + key + ".wkt")); bos.write(((String) wktIndexed.get("wkt")).getBytes()); bos.flush(); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (bos != null) { try { bos.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } logger.info("wkt written to file"); gc.setArea_km(SpatialUtil.calculateArea((String) wktIndexed.get("wkt")) / 1000.0 / 1000.0); //store map wktMap = (int[]) wktIndexed.get("map"); //write wkt index FileWriter fw = null; try { fw = new FileWriter(filePath + File.separator + key + ".wkt.index"); fw.append((String) wktIndexed.get("index")); fw.flush(); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (fw != null) { try { fw.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } //write wkt index a binary, include extents (minx, miny, maxx, maxy) and area (sq km) int minPolygonNumber = 0; int maxPolygonNumber = 0; RandomAccessFile raf = null; try { raf = new RandomAccessFile(filePath + File.separator + key + ".wkt.index.dat", "rw"); String[] index = ((String) wktIndexed.get("index")).split("\n"); for (int i = 0; i < index.length; i++) { if (index[i].length() > 1) { String[] cells = index[i].split(","); int polygonNumber = Integer.parseInt(cells[0]); raf.writeInt(polygonNumber); //polygon number int polygonStart = Integer.parseInt(cells[1]); raf.writeInt(polygonStart); //character offset if (i == 0) { minPolygonNumber = polygonNumber; } else if (i == index.length - 1) { maxPolygonNumber = polygonNumber; } } } } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (raf != null) { try { raf.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } //for SLD maxValues.add(gc.getMaxShapeIdx()); labels.add(name.replace("\"", "'")); gc.setMinShapeIdx(minPolygonNumber); gc.setMaxShapeIdx(maxPolygonNumber); logger.info("getting multipolygon for " + filePath + " > " + key); MultiPolygon mp = Envelope.getGridEnvelopeAsMultiPolygon(filePath + "," + key + "," + key); gc.setBbox(mp.getEnvelope().toText().replace(" (", "(").replace(", ", ",")); classes.put(k, gc); try { //write class kml zos = null; try { zos = new ZipOutputStream( new FileOutputStream(filePath + File.separator + key + ".kml.zip")); zos.putNextEntry(new ZipEntry(key + ".kml")); Encoder encoder = new Encoder(new KMLConfiguration()); encoder.setIndenting(true); encoder.encode(mp, KML.Geometry, zos); zos.flush(); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (zos != null) { try { zos.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } logger.info("kml written to file"); final SimpleFeatureType TYPE = DataUtilities.createType("class", "the_geom:MultiPolygon,id:Integer,name:String"); FeatureJSON fjson = new FeatureJSON(); SimpleFeatureBuilder featureBuilder = new SimpleFeatureBuilder(TYPE); SimpleFeature sf = featureBuilder.buildFeature(null); //write class geojson zos = null; try { zos = new ZipOutputStream( new FileOutputStream(filePath + File.separator + key + ".geojson.zip")); zos.putNextEntry(new ZipEntry(key + ".geojson")); featureBuilder.add(mp); featureBuilder.add(k); featureBuilder.add(name); fjson.writeFeature(sf, zos); zos.flush(); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (zos != null) { try { zos.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } logger.info("geojson written to file"); //write class shape file File newFile = new File(filePath + File.separator + key + ".shp"); ShapefileDataStoreFactory dataStoreFactory = new ShapefileDataStoreFactory(); Map<String, Serializable> params = new HashMap<String, Serializable>(); params.put("url", newFile.toURI().toURL()); params.put("create spatial index", Boolean.FALSE); ShapefileDataStore newDataStore = null; try { newDataStore = (ShapefileDataStore) dataStoreFactory.createNewDataStore(params); newDataStore.createSchema(TYPE); newDataStore.forceSchemaCRS(DefaultGeographicCRS.WGS84); Transaction transaction = new DefaultTransaction("create"); String typeName = newDataStore.getTypeNames()[0]; SimpleFeatureSource featureSource = newDataStore.getFeatureSource(typeName); SimpleFeatureStore featureStore = (SimpleFeatureStore) featureSource; featureStore.setTransaction(transaction); List<SimpleFeature> features = new ArrayList<SimpleFeature>(); DefaultFeatureCollection collection = new DefaultFeatureCollection(); collection.addAll(features); featureStore.setTransaction(transaction); features.add(sf); featureStore.addFeatures(collection); transaction.commit(); transaction.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (newDataStore != null) { try { newDataStore.dispose(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } zos = null; try { zos = new ZipOutputStream( new FileOutputStream(filePath + File.separator + key + ".shp.zip")); //add .dbf .shp .shx .prj String[] exts = { ".dbf", ".shp", ".shx", ".prj" }; for (String ext : exts) { zos.putNextEntry(new ZipEntry(key + ext)); FileInputStream fis = null; try { fis = new FileInputStream(filePath + File.separator + key + ext); byte[] buffer = new byte[1024]; int size; while ((size = fis.read(buffer)) > 0) { zos.write(buffer, 0, size); } } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (fis != null) { try { fis.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } //remove unzipped files new File(filePath + File.separator + key + ext).delete(); } zos.flush(); } catch (Exception e) { logger.error(e.getMessage(), e); } finally { if (zos != null) { try { zos.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } logger.info("shape file written to zip"); } catch (Exception e) { logger.error(e.getMessage(), e); } } catch (Exception e) { logger.error(e.getMessage(), e); } } //write polygon mapping g.writeGrid(filePath + File.separator + "polygons", wktMap, g.xmin, g.ymin, g.xmax, g.ymax, g.xres, g.yres, g.nrows, g.ncols); //copy the header file to get it exactly the same, but change the data type copyHeaderAsInt(filePath + ".grd", filePath + File.separator + "polygons.grd"); } else { //build classes without generating polygons Map<Float, float[]> info = new HashMap<Float, float[]>(); for (int j = 0; j < keys.size(); j++) { info.put(keys.get(j).floatValue(), new float[] { 0, Float.NaN, Float.NaN, Float.NaN, Float.NaN }); } g.getClassInfo(info); for (int j = 0; j < keys.size(); j++) { int k = keys.get(j); String key = String.valueOf(k); String name = p.getProperty(key); GridClass gc = new GridClass(); gc.setName(name); gc.setId(k); //for SLD maxValues.add(Integer.valueOf(key)); labels.add(name.replace("\"", "'")); gc.setMinShapeIdx(Integer.valueOf(key)); gc.setMaxShapeIdx(Integer.valueOf(key)); float[] stats = info.get(keys.get(j).floatValue()); //only include if area > 0 if (stats[0] > 0) { gc.setBbox("POLYGON((" + stats[1] + " " + stats[2] + "," + stats[1] + " " + stats[4] + "," + stats[3] + " " + stats[4] + "," + stats[3] + " " + stats[2] + "," + stats[1] + " " + stats[2] + "))"); gc.setArea_km((double) stats[0]); classes.put(k, gc); } } } //write sld exportSLD(filePath + File.separator + "polygons.sld", new File(filePath + ".txt").getName(), maxValues, labels); writeProjectionFile(filePath + File.separator + "polygons.prj"); //write .classes.json ObjectMapper mapper = new ObjectMapper(); mapper.writeValue(new File(filePath + ".classes.json"), classes); return classes; }
From source file:org.commoncrawl.service.crawler.CrawlLog.java
private static void updateLogFileHeader(File logFileName, LogFileHeader header, long addedRecordCount) throws IOException { RandomAccessFile file = new RandomAccessFile(logFileName, "rw"); try {//from w ww . j a va 2s. c o m // update cached header ... header._fileSize = file.getChannel().size(); header._itemCount += addedRecordCount; // set the position at zero .. file.seek(0); // and write header to disk ... header.writeHeader(file); } finally { // major bottle neck.. // file.getFD().sync(); file.close(); } }
From source file:com.funambol.foundation.util.FileSystemDAOHelper.java
/** * Writes the InputStream in append to the file starting from the given * position. If the file size does not match the expected value an * EOFException is thrown.//w ww . j a v a2s. c o m * * @param f the file to open in writing * @param in the InputStream to copy in the file * @param pos the file-pointer offset * @param expectedSize the expected file size * @throws IOException if an error occurs */ public void copyInRandomAccessFile(File f, InputStream in, long pos, long expectedSize) throws IOException { // // open file for reading and writing: also its metadata will be updated // RandomAccessFile raf = new RandomAccessFile(f, "rws"); raf.seek(pos); byte[] buffer = new byte[1024]; long count = 0; while (true) { int read = in.read(buffer); if (read < 0) { break; } count += read; if (expectedSize >= 0 && count > expectedSize) { throw new EOFException("More bytes (" + count + ") than expected (" + expectedSize + ")"); } raf.write(buffer, 0, read); } raf.close(); if (expectedSize >= 0 && count < expectedSize) { throw new EOFException("Less bytes (" + count + ") than expected (" + expectedSize + ")"); } }
From source file:com.owncloud.android.lib.common.network.FileRequestEntity.java
@Override public void writeRequest(final OutputStream out) throws IOException { //byte[] tmp = new byte[4096]; ByteBuffer tmp = ByteBuffer.allocate(4096); int readResult = 0; // globally in some fashionable manner RandomAccessFile raf = new RandomAccessFile(mFile, "r"); FileChannel channel = raf.getChannel(); Iterator<OnDatatransferProgressListener> it = null; long transferred = 0; long size = mFile.length(); if (size == 0) size = -1;//from www . j a v a 2 s . c o m try { while ((readResult = channel.read(tmp)) >= 0) { out.write(tmp.array(), 0, readResult); tmp.clear(); transferred += readResult; synchronized (mDataTransferListeners) { it = mDataTransferListeners.iterator(); while (it.hasNext()) { it.next().onTransferProgress(readResult, transferred, size, mFile.getAbsolutePath()); } } } } catch (IOException io) { Log_OC.e("FileRequestException", io.getMessage()); throw new RuntimeException( "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really", io); } finally { channel.close(); raf.close(); } }
From source file:org.apache.hadoop.hdfs.server.namenode.NNStorage.java
@Override // Storage public boolean isConversionNeeded(StorageDirectory sd) throws IOException { if (disablePreUpgradableLayoutCheck) { return false; }// w w w . j av a 2s . co m File oldImageDir = new File(sd.getRoot(), "image"); if (!oldImageDir.exists()) { return false; } // check the layout version inside the image file File oldF = new File(oldImageDir, "fsimage"); RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); try { oldFile.seek(0); int oldVersion = oldFile.readInt(); oldFile.close(); oldFile = null; if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) return false; } finally { IOUtils.cleanup(LOG, oldFile); } return true; }
From source file:com.cerema.cloud2.lib.common.network.FileRequestEntity.java
@Override public void writeRequest(final OutputStream out) throws IOException { //byte[] tmp = new byte[4096]; ByteBuffer tmp = ByteBuffer.allocate(4096); int readResult = 0; // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it // globally in some fashionable manner RandomAccessFile raf = new RandomAccessFile(mFile, "r"); FileChannel channel = raf.getChannel(); Iterator<OnDatatransferProgressListener> it = null; long transferred = 0; long size = mFile.length(); if (size == 0) size = -1;// w w w .ja v a 2 s . co m try { while ((readResult = channel.read(tmp)) >= 0) { out.write(tmp.array(), 0, readResult); tmp.clear(); transferred += readResult; synchronized (mDataTransferListeners) { it = mDataTransferListeners.iterator(); while (it.hasNext()) { it.next().onTransferProgress(readResult, transferred, size, mFile.getAbsolutePath()); } } } } catch (IOException io) { Log_OC.e("FileRequestException", io.getMessage()); throw new RuntimeException( "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really", io); } finally { channel.close(); raf.close(); } }
From source file:GridFDock.DataDistribute.java
public Status uploadFile(String remoteFile, File localFile, FTPClient ftpClient, long remoteSize) throws IOException { long step = localFile.length() / 100; long process = 0; long localreadbytes = 0L; boolean tmp2 = true; Status result = null;//www .j a v a2 s . com ftpClient.setFileType(FTP.BINARY_FILE_TYPE); while (tmp2) { RandomAccessFile raf = new RandomAccessFile(localFile, "r"); OutputStream out = ftpClient.appendFileStream(new String(remoteFile.getBytes(CODING_1), CODING_2)); if (remoteSize > 0) { ftpClient.setRestartOffset(remoteSize); process = remoteSize / step; raf.seek(remoteSize); localreadbytes = remoteSize; } byte[] bytes = new byte[1024]; int c; while ((c = raf.read(bytes)) != -1) { out.write(bytes, 0, c); localreadbytes += c; if (localreadbytes / step != process) { process = localreadbytes / step; // System.out.println("Upload Progress" + process); } } out.flush(); raf.close(); out.close(); boolean judge = ftpClient.completePendingCommand(); if (judge) { result = Status.Upload_From_Break_Success; tmp2 = false; } else { result = Status.Upload_New_File_Failed; } } return result; }
From source file:org.artifactory.webapp.wicket.page.logs.SystemLogsViewPanel.java
/** * Attemps to continue reading the log file from the last position, and the updates the log path, size and link * According to the outcome.//from www.java 2 s . com * * @param cleanPanel True if the text container should be cleaned of content. false if not * @return String - The newly read content */ protected String readLogAndUpdateSize(boolean cleanPanel) { if ((lastPointer > systemLogFile.length()) || cleanPanel) { lastPointer = 0; } long size = systemLogFile.length(); setLogInfo(); if (lastPointer == size) { return ""; } StringBuilder sb = new StringBuilder(); RandomAccessFile logRandomAccessFile = null; try { logRandomAccessFile = new RandomAccessFile(systemLogFile, "r"); //If the log file is larger than 100K if (lastPointer == 0 && logRandomAccessFile.length() > FIRST_READ_BLOCK_SIZE) { //Point to the begining of the last 100K lastPointer = logRandomAccessFile.length() - FIRST_READ_BLOCK_SIZE; } logRandomAccessFile.seek(lastPointer); String line; while ((line = logRandomAccessFile.readLine()) != null) { CharSequence escapedLine = Strings.escapeMarkup(line, false, false); sb.append("<div>").append(escapedLine).append("<br/></div>"); } lastPointer = logRandomAccessFile.getFilePointer(); } catch (IOException e) { throw new RuntimeException(e.getMessage()); } finally { try { if (logRandomAccessFile != null) { logRandomAccessFile.close(); } } catch (IOException ignore) { } } return sb.toString(); }
From source file:org.apache.activemq.store.kahadb.KahaDBStoreRecoveryBrokerTest.java
@Override @SuppressWarnings("resource") protected BrokerService createRestartedBroker() throws Exception { // corrupting index File index = new File(kahaDbDirectoryName + "/db.data"); RandomAccessFile raf = new RandomAccessFile(index, "rw"); switch (failTest) { case FailToLoad: index.delete();//from w w w. j a v a2s .co m raf = new RandomAccessFile(index, "rw"); raf.seek(index.length()); raf.writeBytes("corrupt"); break; case LoadInvalid: // page size 0 raf.seek(0); raf.writeBytes("corrupt and cannot load metadata"); break; case LoadCorrupt: // loadable but invalid metadata // location of order index low priority index for first destination... raf.seek(8 * 1024 + 57); raf.writeLong(Integer.MAX_VALUE - 10); break; case LoadOrderIndex0: // loadable but invalid metadata // location of order index default priority index size // so looks like there are no ids in the order index // picked up by setCheckForCorruptJournalFiles raf.seek(12 * 1024 + 21); raf.writeShort(0); raf.writeChar(0); raf.writeLong(-1); break; default: } raf.close(); // starting broker BrokerService broker = new BrokerService(); KahaDBStore kaha = new KahaDBStore(); kaha.setCheckForCorruptJournalFiles(failTest == CorruptionType.LoadOrderIndex0); // uncomment if you want to test archiving //kaha.setArchiveCorruptedIndex(true); kaha.setDirectory(new File(kahaDbDirectoryName)); broker.setPersistenceAdapter(kaha); return broker; }