List of usage examples for java.io FileInputStream getChannel
public FileChannel getChannel()
From source file:org.apache.hadoop.hdfs.BlockReaderLocal.java
private BlockReaderLocal(Configuration conf, String hdfsfile, Block block, Token<BlockTokenIdentifier> token, long startOffset, long length, BlockLocalPathInfo pathinfo, DataChecksum checksum, boolean verifyChecksum, FileInputStream dataIn, FileInputStream checksumIn) throws IOException { super(new Path("/blk_" + block.getBlockId() + ":of:" + hdfsfile) /*too non path-like?*/, 1, checksum, verifyChecksum);//from w ww . ja va2s .c o m this.startOffset = startOffset; this.dataIn = dataIn; this.checksumIn = checksumIn; this.checksum = checksum; long blockLength = pathinfo.getNumBytes(); /* If bytesPerChecksum is very large, then the metadata file * is mostly corrupted. For now just truncate bytesPerchecksum to * blockLength. */ bytesPerChecksum = checksum.getBytesPerChecksum(); if (bytesPerChecksum > 10 * 1024 * 1024 && bytesPerChecksum > blockLength) { checksum = DataChecksum.newDataChecksum(checksum.getChecksumType(), Math.max((int) blockLength, 10 * 1024 * 1024)); bytesPerChecksum = checksum.getBytesPerChecksum(); } checksumSize = checksum.getChecksumSize(); long endOffset = blockLength; if (startOffset < 0 || startOffset > endOffset || (length + startOffset) > endOffset) { String msg = " Offset " + startOffset + " and length " + length + " don't match block " + block + " ( blockLen " + endOffset + " )"; LOG.warn("BlockReaderLocal requested with incorrect offset: " + msg); throw new IOException(msg); } firstChunkOffset = (startOffset - (startOffset % bytesPerChecksum)); if (length >= 0) { // Make sure endOffset points to end of a checksumed chunk. long tmpLen = startOffset + length; if (tmpLen % bytesPerChecksum != 0) { tmpLen += (bytesPerChecksum - tmpLen % bytesPerChecksum); } if (tmpLen < endOffset) { endOffset = tmpLen; } } // seek to the right offsets if (firstChunkOffset > 0) { dataIn.getChannel().position(firstChunkOffset); long checksumSkip = (firstChunkOffset / bytesPerChecksum) * checksumSize; // note blockInStream is seeked when created below if (checksumSkip > 0) { checksumIn.skip(checksumSkip); } } lastChunkOffset = firstChunkOffset; lastChunkLen = -1; }
From source file:edu.harvard.iq.dvn.core.web.servlet.FileDownloadServlet.java
public void deliverContent(StudyFile file, FileDownloadObject fileDownload, HttpServletResponse res) { OutputStream out = null;// w w w .j a va 2s .c o m try { out = res.getOutputStream(); } catch (IOException ex) { // TODO: try to generate error response. return; } InputStream in = fileDownload.getInputStream(); if (in == null) { // TODO: generate error response. fileDownload.releaseConnection(); return; } // If we are streaming a TAB-delimited file, we will need to add the // variable header line: String varHeaderLine = null; if (!fileDownload.noVarHeader()) { varHeaderLine = fileDownload.getVarHeader(); } for (int i = 0; i < fileDownload.getResponseHeaders().length; i++) { String headerName = fileDownload.getResponseHeaders()[i].getName(); // The goal is to (re)use all the Content-* headers. // (if this is a remote file, we may be recycling the headers // we have received from the remote repository): if (headerName.startsWith("Content")) { // Special treatment case for remote // HTML pages: // if it looks like HTML, we redirect to // that page, instead of trying to display it: // (this is for cases like the harvested HGL // documents which contain URLs pointing to // dynamic content pages, not to static files. if (headerName.equals("Content-Type") && file.isRemote() && fileDownload.getResponseHeaders()[i].getValue() != null && fileDownload.getResponseHeaders()[i].getValue().startsWith("text/html")) { createRedirectResponse(res, fileDownload.getRemoteUrl()); fileDownload.releaseConnection(); return; } String headerValue = fileDownload.getResponseHeaders()[i].getValue(); if (fileDownload.isZippedStream()) { headerValue = headerValue.replace(".zip", ""); } res.setHeader(headerName, headerValue); } } // TODO: should probably do explicit res.setContent (), if mimetype // is available. // and now send the incoming HTTP stream as the response body if (fileDownload.isFile()) { // for files that we are reading off disk (as opposed to remote // streams we are reading through network sockets) it is more // efficient to use NIO channels. FileInputStream fis = (FileInputStream) in; FileChannel inChannel = fis.getChannel(); WritableByteChannel outChannel = Channels.newChannel(out); streamData(inChannel, outChannel, varHeaderLine); } else { streamData(in, out, varHeaderLine); } fileDownload.releaseConnection(); }
From source file:com.joyent.manta.client.MantaClient.java
/** * Puts an object into Manta./* w ww . j a v a2 s . c o m*/ * * @param rawPath The path to the Manta object. * @param source {@link InputStream} to copy object data from * @param contentLength the total length of the stream (-1 if unknown) * @param headers optional HTTP headers to include when copying the object * @param metadata optional user-supplied metadata for object * @return Manta response object * @throws IOException If an IO exception has occurred. * @throws MantaClientHttpResponseException If a http status code {@literal > 300} is returned. */ public MantaObjectResponse put(final String rawPath, final InputStream source, final long contentLength, final MantaHttpHeaders headers, final MantaMetadata metadata) throws IOException { Validate.notBlank(rawPath, "rawPath must not be blank"); Validate.notNull(source, "Input stream must not be null"); final String path = formatPath(rawPath); final ContentType contentType = ContentTypeLookup.findOrDefaultContentType(headers, ContentType.APPLICATION_OCTET_STREAM); final int preLoadSize = config.getUploadBufferSize(); final HttpEntity entity; /* We don't know how big the stream is, so we read N bytes from it and * see if it ends. If it ended, then we just convert that buffer into * an entity and pass it. If it didn't end, then we create new stream * that concatenates the bytes read with the source stream. * Unfortunately, this will put us in a chunked transfer encoding and * it will affect performance. */ if (contentLength < 0) { // If our stream is a FileInputStream, then we can pull the size off of it if (source.getClass().equals(FileInputStream.class)) { FileInputStream fsin = (FileInputStream) source; entity = new InputStreamEntity(fsin, fsin.getChannel().size(), contentType); } else { byte[] preLoad = new byte[preLoadSize]; int read = IOUtils.read(source, preLoad); // The total amount of bytes read was less than the preload size, // so we can just return a in-memory non-streaming entity if (read < preLoadSize) { entity = new ExposedByteArrayEntity(preLoad, 0, read, contentType); } else { ByteArrayInputStream bin = new ByteArrayInputStream(preLoad); SequenceInputStream sin = new SequenceInputStream(bin, source); entity = new InputStreamEntity(sin, contentType); } } /* We know how big the stream is, so we can decide if it is within our * preload threshold and load it into memory or if it isn't within the * threshold, we can pass it on as a streamed entity in non-chunked mode. */ } else { if (contentLength <= preLoadSize && contentLength <= Integer.MAX_VALUE) { byte[] preLoad = new byte[(int) contentLength]; IOUtils.read(source, preLoad); entity = new ExposedByteArrayEntity(preLoad, contentType); } else { entity = new InputStreamEntity(source, contentLength, contentType); } } return httpHelper.httpPut(path, headers, entity, metadata); }
From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java
/** * updat this save this local segment into the db * //from w w w . j a v a 2s. com * @param connection * @param addsi */ protected void updateDBPatchFilesystem(Connection connection) throws SQLException, IOException { PreparedStatement segmentUpdate = null; PreparedStatement segmentInsert = null; FileChannel packetStream = null; FileInputStream packetFIS = null; FileChannel sharedStream = null; FileOutputStream sharedFOS = null; File packetFile = null; File sharedFinalFile = null; File sharedTempFile = null; long newVersion = System.currentTimeMillis(); try { sharedTempFile = new File(getSharedTempFileName(INDEX_PATCHNAME)); sharedFinalFile = new File(getSharedFileName(INDEX_PATCHNAME, sharedStructuredStorage)); packetFile = clusterStorage.packPatch(); if (packetFile.exists()) { packetFIS = new FileInputStream(packetFile); packetStream = packetFIS.getChannel(); File sharedTempFileParent = sharedTempFile.getParentFile(); if (!sharedTempFileParent.exists() && !sharedTempFileParent.mkdirs()) { log.warn("couldn't create " + sharedTempFileParent.getPath()); } sharedFOS = new FileOutputStream(sharedTempFile); sharedStream = sharedFOS.getChannel(); doBlockedStream(packetStream, sharedStream); packetStream.close(); sharedStream.close(); segmentUpdate = connection .prepareStatement("update search_segments set version_ = ?, size_ = ? where name_ = ? "); segmentInsert = connection .prepareStatement("insert into search_segments ( name_, version_, size_ ) values ( ?,?,?)"); segmentUpdate.clearParameters(); segmentUpdate.setLong(1, newVersion); segmentUpdate.setLong(2, packetFile.length()); segmentUpdate.setString(3, INDEX_PATCHNAME); if (segmentUpdate.executeUpdate() != 1) { segmentInsert.clearParameters(); segmentInsert.setString(1, INDEX_PATCHNAME); segmentInsert.setLong(2, newVersion); segmentInsert.setLong(3, packetFile.length()); if (segmentInsert.executeUpdate() != 1) { throw new SQLException(" Failed to add patch packet "); } } long st = System.currentTimeMillis(); if (!sharedTempFile.renameTo(sharedFinalFile)) { log.warn("Couldn't rename file " + sharedTempFile.getPath() + " to " + sharedFinalFile.getPath()); } if (searchService.hasDiagnostics()) { log.info("Renamed " + sharedTempFile.getPath() + " to " + sharedFinalFile.getPath() + " in " + (System.currentTimeMillis() - st) + "ms"); } } else { log.warn("Packet file does not exist " + packetFile.getPath()); } } finally { try { if (packetStream != null) { packetStream.close(); packetFIS.close(); } } catch (Exception ex) { log.debug(ex); } try { packetFile.delete(); } catch (Exception ex) { log.debug(ex); } try { if (sharedStream != null) { sharedStream.close(); sharedFOS.close(); } } catch (Exception ex) { log.debug(ex); } try { sharedTempFile.delete(); } catch (Exception ex) { log.debug(ex); } try { segmentUpdate.close(); } catch (Exception ex) { log.debug(ex); } try { segmentInsert.close(); } catch (Exception ex) { log.debug(ex); } } }
From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java
/** * updat this save this local segment into the db * /*from ww w .j av a 2s .c o m*/ * @param connection * @param addsi */ protected void updateDBSegmentFilesystem(Connection connection, SegmentInfo addsi) throws SQLException, IOException { PreparedStatement segmentUpdate = null; PreparedStatement segmentInsert = null; FileChannel packetStream = null; FileInputStream packetFIS = null; FileChannel sharedStream = null; FileOutputStream sharedFOS = null; File packetFile = null; File sharedFinalFile = null; File sharedTempFile = null; long newVersion = System.currentTimeMillis(); try { sharedTempFile = new File(getSharedTempFileName(addsi.getName())); sharedFinalFile = new File(getSharedFileName(addsi.getName(), sharedStructuredStorage)); packetFile = clusterStorage.packSegment(addsi, newVersion); if (packetFile.exists()) { packetFIS = new FileInputStream(packetFile); packetStream = packetFIS.getChannel(); File parentFile = sharedTempFile.getParentFile(); if (!parentFile.exists() && !parentFile.mkdirs()) { log.warn("Unable to create directory " + sharedTempFile.getParentFile().getPath()); } sharedFOS = new FileOutputStream(sharedTempFile); sharedStream = sharedFOS.getChannel(); // Copy file contents from source to destination doBlockedStream(packetStream, sharedStream); packetStream.close(); sharedStream.close(); segmentUpdate = connection.prepareStatement( "update search_segments set version_ = ?, size_ = ? where name_ = ? and version_ = ?"); segmentInsert = connection .prepareStatement("insert into search_segments ( name_, version_, size_ ) values ( ?,?,?)"); if (addsi.isInDb()) { segmentUpdate.clearParameters(); segmentUpdate.setLong(1, newVersion); segmentUpdate.setLong(2, packetFile.length()); segmentUpdate.setString(3, addsi.getName()); segmentUpdate.setLong(4, addsi.getVersion()); if (segmentUpdate.executeUpdate() != 1) { throw new SQLException(" ant Find packet to update " + addsi); } } else { segmentInsert.clearParameters(); segmentInsert.setString(1, addsi.getName()); segmentInsert.setLong(2, newVersion); segmentInsert.setLong(3, packetFile.length()); if (segmentInsert.executeUpdate() != 1) { throw new SQLException(" Failed to insert packet " + addsi); } } addsi.setVersion(newVersion); File sharedParentFile = sharedFinalFile.getParentFile(); if (!sharedParentFile.exists() && !sharedParentFile.mkdirs()) { log.warn("Couln't create directory " + sharedParentFile.getPath()); } long st = System.currentTimeMillis(); if (!sharedTempFile.renameTo(sharedFinalFile)) { log.warn("Couldn't rename " + sharedTempFile.getPath() + " to " + sharedFinalFile.getPath()); } if (searchService.hasDiagnostics()) { log.info("Renamed " + sharedTempFile.getPath() + " to " + sharedFinalFile.getPath() + " in " + (System.currentTimeMillis() - st) + "ms"); } log.info("DB Updated " + addsi); } else { log.warn("Packet file does not exist " + packetFile.getPath()); } } finally { try { packetStream.close(); packetFIS.close(); } catch (Exception ex) { log.debug(ex); } try { packetFile.delete(); } catch (Exception ex) { log.debug(ex); } try { sharedStream.close(); sharedFOS.close(); } catch (Exception ex) { log.debug(ex); } try { sharedTempFile.delete(); } catch (Exception ex) { log.debug(ex); } try { segmentUpdate.close(); } catch (Exception ex) { log.debug(ex); } try { segmentInsert.close(); } catch (Exception ex) { log.debug(ex); } } }
From source file:com.linkedin.pinot.core.segment.creator.impl.inv.OffHeapBitmapInvertedIndexCreator.java
@Override public void seal() throws IOException { FileOutputStream fos = null;//from w ww. j a v a 2 s . co m FileInputStream fisOffsets = null; FileInputStream fisBitmaps = null; final DataOutputStream bitmapsOut; final DataOutputStream offsetsOut; String tempOffsetsFile = invertedIndexFile + ".offsets"; String tempBitmapsFile = invertedIndexFile + ".binary"; try { // build the posting list constructPostingLists(); // we need two separate streams, one to write the offsets and another to write the serialized // bitmap data. We need two because we dont the serialized length of each bitmap without // constructing. offsetsOut = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempOffsetsFile))); bitmapsOut = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempBitmapsFile))); // write out offsets of bitmaps. The information can be used to access a certain bitmap // directly. // Totally (invertedIndex.length+1) offsets will be written out; the last offset is used to // calculate the length of // the last bitmap, which might be needed when accessing bitmaps randomly. // If a bitmap's offset is k, then k bytes need to be skipped to reach the bitmap. int startOffset = 4 * (cardinality + 1); offsetsOut.writeInt(startOffset);// The first bitmap's offset MutableRoaringBitmap bitmap = new MutableRoaringBitmap(); for (int i = 0; i < cardinality; i++) { bitmap.clear(); int length = postingListLengths.get(i); for (int j = 0; j < length; j++) { int bufferOffset = postingListStartOffsets.get(i) + j; int value = postingListBuffer.get(bufferOffset); bitmap.add(value); } // serialize bitmap to bitmapsOut stream bitmap.serialize(bitmapsOut); startOffset += bitmap.serializedSizeInBytes(); // write offset offsetsOut.writeInt(startOffset); } offsetsOut.close(); bitmapsOut.close(); // merge the two files by simply writing offsets data first and then bitmap serialized data fos = new FileOutputStream(invertedIndexFile); fisOffsets = new FileInputStream(tempOffsetsFile); fisBitmaps = new FileInputStream(tempBitmapsFile); FileChannel channelOffsets = fisOffsets.getChannel(); channelOffsets.transferTo(0, channelOffsets.size(), fos.getChannel()); FileChannel channelBitmaps = fisBitmaps.getChannel(); channelBitmaps.transferTo(0, channelBitmaps.size(), fos.getChannel()); LOGGER.debug("persisted bitmap inverted index for column : " + spec.getName() + " in " + invertedIndexFile.getAbsolutePath()); } catch (Exception e) { LOGGER.error("Exception while creating bitmap index for column:" + spec.getName(), e); } finally { IOUtils.closeQuietly(fos); IOUtils.closeQuietly(fisOffsets); IOUtils.closeQuietly(fisOffsets); IOUtils.closeQuietly(fos); IOUtils.closeQuietly(fos); // MMaputils handles the null checks for buffer MmapUtils.unloadByteBuffer(origValueBuffer); origValueBuffer = null; valueBuffer = null; if (origLengths != null) { MmapUtils.unloadByteBuffer(origLengths); origLengths = null; lengths = null; } MmapUtils.unloadByteBuffer(origPostingListBuffer); origPostingListBuffer = null; postingListBuffer = null; MmapUtils.unloadByteBuffer(origPostingListCurrentOffsets); origPostingListCurrentOffsets = null; postingListCurrentOffsets = null; MmapUtils.unloadByteBuffer(origPostingListLengths); origPostingListLengths = null; postingListLengths = null; MmapUtils.unloadByteBuffer(origPostingListStartOffsets); origPostingListStartOffsets = null; postingListStartOffsets = null; FileUtils.deleteQuietly(new File(tempOffsetsFile)); FileUtils.deleteQuietly(new File(tempBitmapsFile)); } }
From source file:com.androidquery.AbstractAQuery.java
/** * Create a temporary file on EXTERNAL storage (sdcard) that holds the cached content of the url. * Returns null if url is not cached, or the system cannot create such file (sdcard is absent, such as in emulator). * //from w w w. j av a2 s. com * The returned file is accessable to all apps, therefore it is ideal for sharing content (such as photo) via the intent mechanism. * * <br> * <br> * Example Usage: * * <pre> * Intent intent = new Intent(Intent.ACTION_SEND); * intent.setType("image/jpeg"); * intent.putExtra(Intent.EXTRA_STREAM, Uri.fromFile(file)); * startActivityForResult(Intent.createChooser(intent, "Share via:"), 0); * </pre> * * <br> * The temp file will be deleted when AQUtility.cleanCacheAsync is invoked, or the file can be explicitly deleted after use. * * @param url The url of the desired cached content. * @param filename The desired file name, which might be used by other apps to describe the content, such as an email attachment. * @return temp file * */ public File makeSharedFile(String url, String filename) { File file = null; try { File cached = getCachedFile(url); if (cached != null) { File temp = AQUtility.getTempDir(); if (temp != null) { file = new File(temp, filename); file.createNewFile(); FileInputStream fis = new FileInputStream(cached); FileOutputStream fos = new FileOutputStream(file); FileChannel ic = fis.getChannel(); FileChannel oc = fos.getChannel(); try { ic.transferTo(0, ic.size(), oc); } finally { AQUtility.close(fis); AQUtility.close(fos); AQUtility.close(ic); AQUtility.close(oc); } } } } catch (Exception e) { AQUtility.debug(e); } return file; }
From source file:MyZone.Settings.java
public byte[] readXML(String filename) { byte[] readIn = null; FileChannel channel = null;/*from ww w .j av a 2 s. co m*/ FileLock lock = null; FileInputStream fis = null; ByteArrayOutputStream baos = null; try { File file = new File(filename); if (!file.exists()) { return null; } fis = new FileInputStream(file); channel = fis.getChannel(); while ((lock = channel.tryLock(0L, Long.MAX_VALUE, true)) == null) { Thread.yield(); } baos = new ByteArrayOutputStream(); byte[] b = new byte[1024]; ByteBuffer buf = ByteBuffer.wrap(b); int count = 0; long fileLength = file.length(); while (fileLength > 0) { count = channel.read(buf); if (count >= 0) { fileLength -= count; baos.write(b, 0, count); buf.rewind(); } } readIn = baos.toByteArray(); } catch (Exception e) { if (DEBUG) { e.printStackTrace(); } readIn = null; } finally { try { if (lock != null) { lock.release(); } if (channel != null) { channel.close(); } if (fis != null) { fis.close(); } if (baos != null) { baos.close(); } } catch (Exception e) { if (DEBUG) { e.printStackTrace(); } readIn = null; } } return readIn; }
From source file:com.datafibers.kafka.connect.SchemaedFileSourceTask.java
private void openFileStream() throws ConnectException, InterruptedException, FileNotFoundException, IOException { try {// w w w.ja va 2s. c om FileInputStream fstream = new FileInputStream(filename); String fdes = fstream.getFD().valid() ? fstream.getFD().toString() : "unknown"; stream = fstream; log.trace("FileInputStream created for {}; fd={}", filename, fdes); Map<String, Object> topicOffset = context.offsetStorageReader().offset(offsetKey(filename)); if (topicOffset != null) { Object lastRecordedOffset = topicOffset.get(TASK_OFFSET_VALUE); if (lastRecordedOffset != null) { if (!(lastRecordedOffset instanceof Long)) { throw new ConnectException("Offset position is the incorrect type"); } if (streamOffset != null) { if (streamOffset > (Long) lastRecordedOffset) { log.trace("streamOffset ({}) is greater than lastRecordedOffset ({})", streamOffset.toString(), lastRecordedOffset.toString()); lastRecordedOffset = streamOffset; } } else { if (config.getReplishAllData()) { log.trace("Ignoring committed offset ({}) to allow republication of existing data", lastRecordedOffset); lastRecordedOffset = 0L; } } long skipLeft = (Long) lastRecordedOffset; while (skipLeft > 0) { try { long skipped = stream.skip(skipLeft); skipLeft -= skipped; } catch (IOException e) { log.error("Error while trying to seek to previous offset in file: ", e); throw new ConnectException(e); } } log.debug("Skipped to offset {}", lastRecordedOffset); } if (streamOffset == null) { streamOffset = (lastRecordedOffset != null) ? (Long) lastRecordedOffset : 0L; } else if (lastRecordedOffset != null) { streamOffset = java.lang.Math.max(streamOffset, (Long) lastRecordedOffset); } } else { if (streamOffset == null) { // first time through streamOffset = 0L; } else { // re-opening file ... make sure we skip over stuff we've read fstream.getChannel().position(streamOffset); } } log.debug("Opened {} for reading; current offset {}", logFilename(), streamOffset); } catch (FileNotFoundException e) { log.warn("Couldn't find file {} for SchemaedFileSourceTask, sleeping to wait for it to be created", logFilename()); synchronized (this) { this.wait(1000); } throw e; } catch (IOException e) { log.warn("Unexpected IOException: {}", e.toString()); synchronized (this) { this.wait(1000); } throw e; } }
From source file:net.sf.firemox.deckbuilder.MdbLoader.java
/** * Load author, tbs name,... Load the rules of this MDB and set them to the MP * environment, set the current offset to the begin of card section and return * it's position Load settings associated to this MDB. <br> * /*from w ww .ja va2 s. c o m*/ * @param dbFile * the MDB file containing rules. * @return return the opened stream as is, the current offset corresponds to * the last byte read of the disclaimer/license section. * @throws IOException * If some other I/O error occurs */ public static FileInputStream loadHeader(String dbFile) throws IOException { if (dbFile.equals(lastMdbFile)) { lastMdbStream.getChannel().position(endOfHeaderOffset); return lastMdbStream; } closeMdb(); final FileInputStream dbStream = openMdb(dbFile, false); tbsFullName = MToolKit.readString(dbStream); version = MToolKit.readString(dbStream); author = MToolKit.readString(dbStream); moreInfo = MToolKit.readString(dbStream); // the database references DatabaseFactory.init(dbStream); artURL = MToolKit.readString(dbStream); backPicture = MToolKit.readString(dbStream); damagePicture = MToolKit.readString(dbStream); disclaimer = MToolKit.readText(dbStream).trim(); // colored mana section coloredManaSmlURL = MToolKit.readString(dbStream); coloredManaBigURL = MToolKit.readString(dbStream); coloredBigManas = new String[IdCommonToken.COLOR_NAMES.length]; coloredSmlManas = new String[IdCommonToken.PAYABLE_COLOR_NAMES.length]; coloredSmlManasHtml = new String[coloredSmlManas.length]; for (int i = IdCommonToken.COLOR_NAMES.length; i-- > 1;) { int index = dbStream.read(); coloredSmlManas[index] = MToolKit.readString(dbStream); coloredBigManas[index] = MToolKit.readString(dbStream); coloredSmlManasHtml[index] = "<img src='file:///" + MToolKit.getTbsHtmlPicture("mana/colored/small/" + coloredSmlManas[index]) + "'> "; } // colorless mana section colorlessURL = MToolKit.readString(dbStream); colorlessBigURL = MToolKit.readString(dbStream); unknownSmlMana = MToolKit.readString(dbStream); unknownSmlManaHtml = "<img src='file:///" + MToolKit.getTbsHtmlPicture("mana/colorless/small/" + unknownSmlMana) + "'> "; colorlessSmlManas = new String[dbStream.read()]; colorlessSmlManasHtml = new String[colorlessSmlManas.length]; for (int i = colorlessSmlManas.length; i-- > 0;) { int index = dbStream.read(); colorlessSmlManas[index] = MToolKit.readString(dbStream); colorlessSmlManasHtml[index] = "<img src='file:///" + MToolKit.getTbsHtmlPicture("mana/colorless/small/" + colorlessSmlManas[index]) + "'> "; } // hybrid mana section hybridManasURL = MToolKit.readString(dbStream); for (int i = 0; i < IdCommonToken.HYBRID_COLOR_NAMES.length; i++) { int index = dbStream.read(); coloredSmlManas[index] = MToolKit.readString(dbStream); coloredSmlManasHtml[index] = "<img src='file:///" + MToolKit.getTbsHtmlPicture("mana/colored/small/" + coloredSmlManas[index]) + "'> "; } // hybrid mana section phyrexianManasURL = MToolKit.readString(dbStream); for (int i = 0; i < IdCommonToken.PHYREXIAN_COLOR_NAMES.length; i++) { int index = dbStream.read(); coloredSmlManas[index] = MToolKit.readString(dbStream); coloredSmlManasHtml[index] = "<img src='file:///" + MToolKit.getTbsHtmlPicture("mana/colored/small/" + coloredSmlManas[index]) + "'> "; } // Read the card bytes position firstCardsBytesOffset = MToolKit.readInt24(dbStream); // the deck constraints DeckConstraints.init(dbStream); // read additional zone ZoneManager.initTbs(dbStream); // the tests references TestFactory.init(dbStream); // the action constraints and picture ActionFactory.init(dbStream); // the objects ObjectFactory.init(dbStream); // the abilities references AbilityFactory.init(dbStream); // read damage type name export Damage.init(dbStream); // load state pictures of card,tooltip filters, exported types CardFactory.init(dbStream); // Read the card names position endOfHeaderOffset = dbStream.getChannel().position(); dbStream.getChannel().position(firstCardsBytesOffset); firstCardsNamesOffset = MToolKit.readInt24(dbStream); dbStream.getChannel().position(endOfHeaderOffset); lastMdbFile = dbFile; return dbStream; }