List of usage examples for java.nio ByteBuffer capacity
public final int capacity()
From source file:com.gemstone.gemfire.internal.cache.OplogJUnitTest.java
/** * This tests whether an empty byte array is correctly writtem to the disk as * a zero value length operation & hence the 4 bytes field for recording the * value length is absent & also since the value length is zero no byte for it * should also get added. Similary during recover from HTree as well as Oplog , * the empty byte array should be read correctly * * @author Asif//from w w w. jav a 2 s . c o m */ @Test public void testEmptyByteArrayPutAndRecovery() { CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() { @Override public void afterConflation(ByteBuffer origBB, ByteBuffer conflatedBB) { if ((2 + 4 + 1 + EntryEventImpl.serialize("key1").length) != origBB.capacity()) { failureCause = "For a backup region, addition of an empty array should result in an offset of 6 bytes where as actual offset is =" + origBB.capacity(); testFailed = true; } Assert.assertTrue( "For a backup region, addition of an empty array should result in an offset of 6 bytes where as actual offset is =" + origBB.capacity(), (2 + 4 + 1 + EntryEventImpl.serialize("key1").length) == origBB.capacity()); } }); try { final int MAX_OPLOG_SIZE = 2000; diskProps.setMaxOplogSize(MAX_OPLOG_SIZE); diskProps.setPersistBackup(true); // diskProps.setRolling(true); diskProps.setSynchronous(true); diskProps.setOverflow(false); diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 1400 }); final byte[] val = new byte[0]; LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true; region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); region.put("key1", val); LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false; region.close(); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); byte[] _val = (byte[]) region.get("key1"); assertTrue( "value of key1 after restarting the region is not an empty byte array. This may indicate problem in reading from Oplog", _val.length == 0); if (this.logWriter.infoEnabled()) { this.logWriter.info( "After first region close & opening again no problems encountered & hence Oplog has been read successfully."); this.logWriter.info( "Closing the region again without any operation done, would indicate that next time data will be loaded from HTree ."); } region.close(); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); _val = (byte[]) region.get("key1"); assertTrue( "value of key1 after restarting the region is not an empty byte array. This may indicate problem in reading from HTRee", _val.length == 0); assertFalse(failureCause, testFailed); // region.close(); } finally { LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false; CacheObserverHolder.setInstance(old); } }
From source file:com.act.lcms.v2.fullindex.Builder.java
protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows) throws RocksDBException, IOException { /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and * conversion of those values into byte arrays that RocksDB can consume. If you haven't already, go read this * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html */*w ww . j a v a2 s . c om*/ * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for: * capacity: The total number of bytes in the array backing the buffer. Don't write more than this. * position: The next index in the buffer to read or write a byte. Moves with each read or write op. * limit: A mark of where the final byte in the buffer was written. Don't read past this. * The remaining() call is affected by the limit. * mark: Ignore this for now, we don't use it. (We'll always, always read buffers from 0.) * * And here are some methods that we'll use often: * clear: Set position = 0, limit = 0. Pretend the buffer is empty, and is ready for more writes. * flip: Set limit = position, then position = 0. This remembers how many bytes were written to the buffer * (as the current position), and then puts the position at the beginning. * Always call this after the write before a read. * rewind: Set position = 0. Buffer is ready for reading, but unless the limit was set we might now know how * many bytes there are to read. Always call flip() before rewind(). Can rewind many times to re-read * the buffer repeatedly. * remaining: How many bytes do we have left to read? Requires an accurate limit value to avoid garbage bytes. * reset: Don't use this. It uses the mark, which we don't need currently. * * Write/read patterns look like: * buffer.clear(); // Clear out anything already in the buffer. * buffer.put(thing1).put(thing2)... // write a bunch of stuff * buffer.flip(); // Prep for reading. Call *once*! * * while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff. * buffer.rewind(); // Ready for reading again! * while (buffer.hasRemaining()) { buffer.get(); } // Etc. * buffer.reset(); // Forget what was written previously, buffer is ready for reuse. * * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a * stream of primitive types to their minimal binary representations. The same operations on objects + object * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class * definition changes slightly, serialization may break). Since the data we're dealing with is pretty simple, we * opt for the low-level approach. */ /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that * range, verify that all of the indices are unique. If they're not, we'll end up overwriting the data in and * corrupting the structure of the index. */ ensureUniqueMZWindowIndices(windows); // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window. ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()]; for (int i = 0; i < mzWindowTripleBuffers.length; i++) { /* Note: the mapping between these buffers and their respective mzWindows is purely positional. Specifically, * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i). We'll map windows * indices to the contents of mzWindowTripleBuffers at the very end of this function. */ mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window. } // Every TMzI gets an index which we'll use later when we're querying by m/z and time. long counter = -1; // We increment at the top of the loop. // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression. // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take. Then reuse! ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES); ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES); List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small. /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process). The m/z * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges. * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points * in m/z order as well. As soon as we've passed out of the range of one of our windows, we discard it. It is * valid for a window to be added to and discarded from the working queue in one application of the work loop. */ LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep. int spectrumCounter = 0; while (iter.hasNext()) { LCMSSpectrum spectrum = iter.next(); float time = spectrum.getTimeVal().floatValue(); // This will record all the m/z + intensity readings that correspond to this timepoint. Exactly sized too! ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size()); // Batch up all the triple writes to reduce the number of times we hit the disk in this loop. // Note: huge success! RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch(); // Initialize the sweep line lists. Windows go follow: tbd -> working -> done (nowhere). LinkedList<MZWindow> workingQueue = new LinkedList<>(); LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay! for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) { // Very important: increment the counter for every triple. Otherwise we'll overwrite triples = Very Bad (tm). counter++; // Brevity = soul of wit! Double mz = mzIntensity.getLeft(); Double intensity = mzIntensity.getRight(); // Reset the buffers so we end up re-using the few bytes we've allocated. counterBuffer.clear(); // Empty (virtually). counterBuffer.putLong(counter); counterBuffer.flip(); // Prep for reading. valBuffer.clear(); // Empty (virtually). TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue()); valBuffer.flip(); // Prep for reading. // First, shift any applicable ranges onto the working queue based on their minimum mz. while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) { workingQueue.add(tbdQueue.pop()); } // Next, remove any ranges we've passed. while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) { workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue. Edge cases! } /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set * that matched with the m/z of our current mzIntensity. However, since we're now also recording the links * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have * any m/z windows in the working set right now. */ // The working queue should now hold only ranges that include this m/z value. Sweep line swept! /* Now add this intensity to the buffers of all the windows in the working queue. Note that since we're only * storing the *index* of the triple, these buffers are going to consume less space than they would if we * stored everything together. */ for (MZWindow window : workingQueue) { // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings. counterBuffer.rewind(); // Already flipped. mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc. Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer); } // We flipped after reading, so we should be good to rewind (to be safe) and write here. counterBuffer.rewind(); valBuffer.rewind(); writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer), Utils.toCompactArray(valBuffer)); // Rewind again for another read. counterBuffer.rewind(); triplesForThisTime.put(counterBuffer); } writeBatch.write(); assert (triplesForThisTime.position() == triplesForThisTime.capacity()); ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time); timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB. triplesForThisTime.flip(); dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer), Utils.toCompactArray(triplesForThisTime)); timepoints.add(time); spectrumCounter++; if (spectrumCounter % 1000 == 0) { LOGGER.info("Extracted %d time spectra", spectrumCounter); } } LOGGER.info("Extracted %d total time spectra", spectrumCounter); // Now write all the mzWindow to triple indexes. RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch(); ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES); for (int i = 0; i < mzWindowTripleBuffers.length; i++) { idBuffer.clear(); idBuffer.putInt(windows.get(i).getIndex()); idBuffer.flip(); ByteBuffer triplesBuffer = mzWindowTripleBuffers[i]; triplesBuffer.flip(); // Prep for read. writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer), Utils.toCompactArray(triplesBuffer)); } writeBatch.write(); dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints)); dbAndHandles.flush(true); }
From source file:edu.brown.hstore.HStoreSite.java
/** * Send the transaction request to another node for execution. We will create * a TransactionRedirectCallback that will automatically send the ClientResponse * generated from the remote node for this txn back to the client * @param catalog_proc//from w ww .j a v a2s.c o m * @param serializedRequest * @param base_partition * @param clientCallback */ public void transactionRedirect(Procedure catalog_proc, ByteBuffer serializedRequest, int base_partition, RpcCallback<ClientResponseImpl> clientCallback) { if (debug.val) LOG.debug( String.format("Forwarding %s request to partition %d [clientHandle=%d]", catalog_proc.getName(), base_partition, StoredProcedureInvocation.getClientHandle(serializedRequest))); // Make a wrapper for the original callback so that when the result comes back frm the remote partition // we will just forward it back to the client. How sweet is that?? RedirectCallback callback = null; try { callback = new RedirectCallback(this); // callback = (RedirectCallback)objectPools.CALLBACKS_TXN_REDIRECT_REQUEST.borrowObject(); callback.init(clientCallback); } catch (Exception ex) { throw new RuntimeException("Failed to get TransactionRedirectCallback", ex); } // Mark this request as having been redirected // XXX: This sucks because we have to copy the bytes, which will then // get copied again when we have to serialize it out to a ByteString serializedRequest.rewind(); ByteBuffer copy = ByteBuffer.allocate(serializedRequest.capacity()); copy.put(serializedRequest); StoredProcedureInvocation.setBasePartition(base_partition, copy); this.hstore_coordinator.transactionRedirect(copy.array(), callback, base_partition); if (hstore_conf.site.txn_counters) TransactionCounter.REDIRECTED.inc(catalog_proc); }
From source file:com.linkedin.databus.core.DbusEventBuffer.java
private long remaining() { if (LOG.isDebugEnabled()) { LOG.debug("Remaining query : head = " + _head.toString() + " tail =" + _tail.toString()); }//from w ww . j a v a 2 s . co m if (empty()) { long space = 0; for (ByteBuffer buf : _buffers) { space += buf.capacity(); } return space; } if (_head.getRealPosition() < _tail.getRealPosition()) { long space = 0; for (int i = 0; i < _head.bufferIndex(); ++i) { space += _buffers[i].capacity(); } space += _head.bufferOffset(); space += _buffers[_tail.bufferIndex()].capacity() - _tail.bufferOffset(); for (int i = _tail.bufferIndex() + 1; i < _buffers.length; ++i) { space += _buffers[i].capacity(); } return space; } if (_head.getRealPosition() > _tail.getRealPosition()) { if (_head.bufferIndex() == _tail.bufferIndex()) { return (_head.getRealPosition() - _tail.getRealPosition()); } else { long space = _buffers[_tail.bufferIndex()].capacity() - _tail.bufferOffset(); space += _head.bufferOffset(); for (int i = _tail.bufferIndex() + 1; i < _head.bufferIndex(); ++i) { space += _buffers[i].capacity(); } return space; } } return 0; }
From source file:com.linkedin.databus.core.DbusEventBuffer.java
private void saveDataBufferMetaInfo(boolean infoOnly) throws IOException { if (_allocationPolicy != AllocationPolicy.MMAPPED_MEMORY || !_bufferPersistenceEnabled) { _log.info("Not saving state metaInfoFile, because allocation policy is " + _allocationPolicy + "; bufferPersistenceEnabled:" + _bufferPersistenceEnabled); return;//from w w w. j a v a2 s . c o m } String fileName = metaFileName() + (infoOnly ? MMAP_META_INFO_SUFFIX : ""); DbusEventBufferMetaInfo mi = new DbusEventBufferMetaInfo(new File(_mmapDirectory, fileName)); _log.info("about to save DbusEventBuffer for PP " + _physicalPartition + " state into " + mi.toString()); // record session id - to figure out directory for the buffers mi.setSessionId(_sessionId); // write buffers specific info - num of buffers, pos and limit of each one mi.setVal(DbusEventBufferMetaInfo.NUM_BYTE_BUFFER, Integer.toString(_buffers.length)); StringBuilder bufferInfo = new StringBuilder(""); for (ByteBuffer b : _buffers) { DbusEventBufferMetaInfo.BufferInfo bi = new DbusEventBufferMetaInfo.BufferInfo(b.position(), b.limit(), b.capacity()); bufferInfo.append(bi.toString()); bufferInfo.append(" "); } mi.setVal(DbusEventBufferMetaInfo.BYTE_BUFFER_INFO, bufferInfo.toString()); String currentWritePosition = Long.toString(_currentWritePosition.getPosition()); mi.setVal(DbusEventBufferMetaInfo.CURRENT_WRITE_POSITION, currentWritePosition); // _maxBufferSize mi.setVal(DbusEventBufferMetaInfo.MAX_BUFFER_SIZE, Integer.toString(_maxBufferSize)); //NOTE. no need to save readBuffer and rwChannel String head = Long.toString(_head.getPosition()); mi.setVal(DbusEventBufferMetaInfo.BUFFER_HEAD, head); String tail = Long.toString(_tail.getPosition()); mi.setVal(DbusEventBufferMetaInfo.BUFFER_TAIL, tail); String empty = Boolean.toString(_empty); mi.setVal(DbusEventBufferMetaInfo.BUFFER_EMPTY, empty); mi.setVal(DbusEventBufferMetaInfo.ALLOCATED_SIZE, Long.toString(_allocatedSize)); mi.setVal(DbusEventBufferMetaInfo.EVENT_START_INDEX, Long.toString(_eventStartIndex.getPosition())); // _numEventsInWindow mi.setVal(DbusEventBufferMetaInfo.NUM_EVENTS_IN_WINDOW, Integer.toString(_numEventsInWindow)); // _lastWrittenSequence mi.setVal(DbusEventBufferMetaInfo.LAST_WRITTEN_SEQUENCE, Long.toString(_lastWrittenSequence)); mi.setVal(DbusEventBufferMetaInfo.SEEN_END_OF_PERIOD_SCN, Long.toString(_seenEndOfPeriodScn)); // _prevScn mi.setVal(DbusEventBufferMetaInfo.PREV_SCN, Long.toString(_prevScn)); // _timestampOfFirstEvent mi.setVal(DbusEventBufferMetaInfo.TIMESTAMP_OF_FIRST_EVENT, Long.toString(_timestampOfFirstEvent)); // _timestampOfLatestDataEvent mi.setVal(DbusEventBufferMetaInfo.TIMESTAMP_OF_LATEST_DATA_EVENT, Long.toString(_timestampOfLatestDataEvent)); // eventState mi.setVal(DbusEventBufferMetaInfo.EVENT_STATE, _eventState.toString()); mi.saveAndClose(); }
From source file:org.bimserver.geometry.GeometryRunner.java
@Override public void run() { Thread.currentThread().setName("GeometryRunner"); long start = System.nanoTime(); job.setStartNanos(start);//from www. ja va2s .com try { HashMapVirtualObject next = objectProvider.next(); Query query = new Query("Double buffer query " + eClass.getName(), this.streamingGeometryGenerator.packageMetaData); QueryPart queryPart = query.createQueryPart(); while (next != null) { long oid = next.getOid(); queryPart.addOid(oid); if (eClass.isSuperTypeOf(next.eClass())) { if (originalQuery.getQueryParts().get(0).getOids().contains(oid)) { job.addObject(next.getOid(), next.eClass().getName()); } } next = objectProvider.next(); } objectProvider = new QueryObjectProvider(databaseSession, this.streamingGeometryGenerator.bimServer, query, Collections.singleton(queryContext.getRoid()), this.streamingGeometryGenerator.packageMetaData); StreamingSerializer serializer = ifcSerializerPlugin.createSerializer(new PluginConfiguration()); RenderEngine renderEngine = null; byte[] bytes = null; try { final Set<HashMapVirtualObject> objects = new LinkedHashSet<>(); ObjectProviderProxy proxy = new ObjectProviderProxy(objectProvider, new ObjectListener() { @Override public void newObject(HashMapVirtualObject next) { if (eClass.isSuperTypeOf(next.eClass())) { if (next.eGet( GeometryRunner.this.streamingGeometryGenerator.representationFeature) != null) { if (originalQuery.getQueryParts().get(0).getOids().contains(next.getOid())) { objects.add(next); } } } } }); serializer.init(proxy, null, null, this.streamingGeometryGenerator.bimServer.getPluginManager(), this.streamingGeometryGenerator.packageMetaData); ByteArrayOutputStream baos = new ByteArrayOutputStream(); IOUtils.copy(serializer.getInputStream(), baos); bytes = baos.toByteArray(); InputStream in = new ByteArrayInputStream(bytes); Map<Integer, HashMapVirtualObject> notFoundObjects = new HashMap<>(); Set<Range> reusableGeometryData = new HashSet<>(); Map<Long, TemporaryGeometryData> productToData = new HashMap<>(); try { if (!objects.isEmpty()) { renderEngine = renderEnginePool.borrowObject(); try (RenderEngineModel renderEngineModel = renderEngine.openModel(in, bytes.length)) { renderEngineModel.setSettings(renderEngineSettings); renderEngineModel.setFilter(renderEngineFilter); try { renderEngineModel.generateGeneralGeometry(); } catch (RenderEngineException e) { if (e.getCause() instanceof java.io.EOFException) { if (objects.isEmpty() || eClass.getName().equals("IfcAnnotation")) { // SKIP } else { StreamingGeometryGenerator.LOGGER.error("Error in " + eClass.getName(), e); } } } OidConvertingSerializer oidConvertingSerializer = (OidConvertingSerializer) serializer; Map<Long, Integer> oidToEid = oidConvertingSerializer.getOidToEid(); Map<Long, DebuggingInfo> debuggingInfo = new HashMap<>(); for (HashMapVirtualObject ifcProduct : objects) { if (!this.streamingGeometryGenerator.running) { return; } Integer expressId = oidToEid.get(ifcProduct.getOid()); try { RenderEngineInstance renderEngineInstance = renderEngineModel .getInstanceFromExpressId(expressId); RenderEngineGeometry geometry = renderEngineInstance.generateGeometry(); boolean translate = true; // if (geometry == null || // geometry.getIndices().length == 0) { // LOGGER.info("Running again..."); // renderEngineModel.setFilter(renderEngineFilterTransformed); // geometry = // renderEngineInstance.generateGeometry(); // if (geometry != null) { // translate = false; // } // renderEngineModel.setFilter(renderEngineFilter); // } if (geometry != null && geometry.getNrIndices() > 0) { HashMapVirtualObject geometryInfo = new HashMapVirtualObject(queryContext, GeometryPackage.eINSTANCE.getGeometryInfo()); HashMapWrappedVirtualObject bounds = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getBounds()); HashMapWrappedVirtualObject minBounds = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); HashMapWrappedVirtualObject maxBounds = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); minBounds.set("x", Double.POSITIVE_INFINITY); minBounds.set("y", Double.POSITIVE_INFINITY); minBounds.set("z", Double.POSITIVE_INFINITY); maxBounds.set("x", -Double.POSITIVE_INFINITY); maxBounds.set("y", -Double.POSITIVE_INFINITY); maxBounds.set("z", -Double.POSITIVE_INFINITY); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_IfcProductOid(), ifcProduct.getOid()); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Bounds(), bounds); bounds.setAttribute(GeometryPackage.eINSTANCE.getBounds_Min(), minBounds); bounds.setAttribute(GeometryPackage.eINSTANCE.getBounds_Max(), maxBounds); HashMapWrappedVirtualObject boundsUntransformed = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getBounds()); WrappedVirtualObject minBoundsUntranslated = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); WrappedVirtualObject maxBoundsUntranslated = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); minBoundsUntranslated.set("x", Double.POSITIVE_INFINITY); minBoundsUntranslated.set("y", Double.POSITIVE_INFINITY); minBoundsUntranslated.set("z", Double.POSITIVE_INFINITY); maxBoundsUntranslated.set("x", -Double.POSITIVE_INFINITY); maxBoundsUntranslated.set("y", -Double.POSITIVE_INFINITY); maxBoundsUntranslated.set("z", -Double.POSITIVE_INFINITY); boundsUntransformed.setAttribute(GeometryPackage.eINSTANCE.getBounds_Min(), minBoundsUntranslated); boundsUntransformed.setAttribute(GeometryPackage.eINSTANCE.getBounds_Max(), maxBoundsUntranslated); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_BoundsUntransformed(), boundsUntransformed); double volume = 0; ObjectNode additionalData = renderEngineInstance.getAdditionalData(); if (streamingGeometryGenerator.isCalculateQuantities()) { if (additionalData != null) { geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_AdditionalData(), additionalData.toString()); if (additionalData.has("TOTAL_SURFACE_AREA")) { geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Area(), additionalData.get("TOTAL_SURFACE_AREA").asDouble()); } if (additionalData.has("TOTAL_SHAPE_VOLUME")) { volume = additionalData.get("TOTAL_SHAPE_VOLUME").asDouble(); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Volume(), volume); } } } HashMapVirtualObject geometryData = new HashMapVirtualObject(queryContext, GeometryPackage.eINSTANCE.getGeometryData()); geometryData.set("type", databaseSession.getCid(eClass)); ByteBuffer indices = geometry.getIndices(); IntBuffer indicesAsInt = indices.order(ByteOrder.LITTLE_ENDIAN) .asIntBuffer(); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_Reused(), 1); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_Indices(), createBuffer(queryContext, indices)); geometryData.set("nrIndices", indicesAsInt.capacity()); ByteBuffer vertices = geometry.getVertices(); DoubleBuffer verticesAsDouble = vertices.order(ByteOrder.LITTLE_ENDIAN) .asDoubleBuffer(); geometryData.set("nrVertices", verticesAsDouble.capacity()); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_Vertices(), createBuffer(queryContext, vertices)); ByteBuffer normals = geometry.getNormals(); FloatBuffer normalsAsFloat = normals.order(ByteOrder.LITTLE_ENDIAN) .asFloatBuffer(); geometryData.set("nrNormals", normalsAsFloat.capacity()); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_Normals(), createBuffer(queryContext, normals)); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_PrimitiveCount(), indicesAsInt.capacity() / 3); job.setTrianglesGenerated(indicesAsInt.capacity() / 3); job.getReport().incrementTriangles(indicesAsInt.capacity() / 3); streamingGeometryGenerator.cacheGeometryData(geometryData, vertices); ColorMap colorMap = new ColorMap(); ByteBuffer colors = ByteBuffer.wrap(new byte[0]); IntBuffer materialIndices = geometry.getMaterialIndices() .order(ByteOrder.LITTLE_ENDIAN).asIntBuffer(); if (materialIndices != null && materialIndices.capacity() > 0) { FloatBuffer materialsAsFloat = geometry.getMaterials() .order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer(); boolean hasMaterial = false; colors = ByteBuffer.allocate((verticesAsDouble.capacity() / 3) * 4); double[] triangle = new double[9]; for (int i = 0; i < materialIndices.capacity(); ++i) { int c = materialIndices.get(i); if (c > -1) { Color4f color = new Color4f(); for (int l = 0; l < 4; ++l) { float val = fixColor(materialsAsFloat.get(4 * c + l)); color.set(l, val); } for (int j = 0; j < 3; ++j) { int k = indicesAsInt.get(i * 3 + j); triangle[j * 3 + 0] = verticesAsDouble.get(3 * k); triangle[j * 3 + 1] = verticesAsDouble.get(3 * k + 1); triangle[j * 3 + 2] = verticesAsDouble.get(3 * k + 2); hasMaterial = true; for (int l = 0; l < 4; ++l) { float val = fixColor(materialsAsFloat.get(4 * c + l)); colors.put(4 * k + l, UnsignedBytes.checkedCast((int) (val * 255))); } } colorMap.addTriangle(triangle, color); } } if (hasMaterial) { ColorMap2 colorMap2 = new ColorMap2(); byte[] colorB = new byte[4]; for (int i = 0; i < colors.capacity(); i += 4) { colors.get(colorB); colorMap2.addColor(colorB); } HashMapVirtualObject colorPack = new HashMapVirtualObject( queryContext, GeometryPackage.eINSTANCE.getColorPack()); colorPack.set(GeometryPackage.eINSTANCE.getColorPack_Data(), colorMap2.toByteArray()); colorPack.save(); geometryData.setReference( GeometryPackage.eINSTANCE.getGeometryData_ColorPack(), colorPack.getOid(), 0); } if (colorMap.usedColors() == 0) { } else if (colorMap.usedColors() == 1) { WrappedVirtualObject color = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector4f()); Color4f firstColor = colorMap.getFirstColor(); color.set("x", firstColor.getR()); color.set("y", firstColor.getG()); color.set("z", firstColor.getB()); color.set("w", firstColor.getA()); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_Color(), color); // This tells the code further on to not store this geometry, as it can be easily generated hasMaterial = false; } else { Color4f mostUsed = colorMap.getMostUsedColor(); WrappedVirtualObject color = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector4f()); color.set("x", mostUsed.getR()); color.set("y", mostUsed.getG()); color.set("z", mostUsed.getB()); color.set("w", mostUsed.getA()); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_MostUsedColor(), color); } if (hasMaterial) { geometryData.set("nrColors", colors.capacity()); geometryData.set( GeometryPackage.eINSTANCE.getGeometryData_ColorsQuantized(), createBuffer(queryContext, colors)); } else { geometryData.set("nrColors", 0); } } else { geometryData.set("nrColors", 0); } boolean hasTransparency = colorMap.hasTransparency(); double[] productTranformationMatrix = new double[16]; if (translate && renderEngineInstance.getTransformationMatrix() != null) { productTranformationMatrix = renderEngineInstance .getTransformationMatrix(); } else { Matrix.setIdentityM(productTranformationMatrix, 0); } geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_NrColors(), colors.capacity()); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_NrVertices(), verticesAsDouble.capacity()); geometryInfo.setReference(GeometryPackage.eINSTANCE.getGeometryInfo_Data(), geometryData.getOid(), 0); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_HasTransparency(), hasTransparency); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_HasTransparency(), hasTransparency); long size = this.streamingGeometryGenerator.getSize(geometryData); for (int i = 0; i < indicesAsInt.capacity(); i++) { this.streamingGeometryGenerator.processExtends(minBounds, maxBounds, productTranformationMatrix, verticesAsDouble, indicesAsInt.get(i) * 3, generateGeometryResult); this.streamingGeometryGenerator.processExtendsUntranslated(geometryInfo, verticesAsDouble, indicesAsInt.get(i) * 3, generateGeometryResult); } HashMapWrappedVirtualObject boundsUntransformedMm = createMmBounds( geometryInfo, boundsUntransformed, generateGeometryResult.getMultiplierToMm()); geometryInfo.set("boundsUntransformedMm", boundsUntransformedMm); HashMapWrappedVirtualObject boundsMm = createMmBounds(geometryInfo, bounds, generateGeometryResult.getMultiplierToMm()); geometryInfo.set("boundsMm", boundsMm); ByteBuffer normalsQuantized = quantizeNormals(normalsAsFloat); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_NormalsQuantized(), createBuffer(queryContext, normalsQuantized)); HashMapWrappedVirtualObject geometryDataBounds = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getBounds()); WrappedVirtualObject geometryDataBoundsMin = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); WrappedVirtualObject geometryDataBoundsMax = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); geometryDataBoundsMin.set("x", ((HashMapWrappedVirtualObject) boundsMm.get("min")).get("x")); geometryDataBoundsMin.set("y", ((HashMapWrappedVirtualObject) boundsMm.get("min")).get("y")); geometryDataBoundsMin.set("z", ((HashMapWrappedVirtualObject) boundsMm.get("min")).get("z")); geometryDataBoundsMax.set("x", ((HashMapWrappedVirtualObject) boundsMm.get("max")).get("x")); geometryDataBoundsMax.set("y", ((HashMapWrappedVirtualObject) boundsMm.get("max")).get("y")); geometryDataBoundsMax.set("z", ((HashMapWrappedVirtualObject) boundsMm.get("max")).get("z")); geometryDataBounds.setAttribute(GeometryPackage.eINSTANCE.getBounds_Min(), geometryDataBoundsMin); geometryDataBounds.setAttribute(GeometryPackage.eINSTANCE.getBounds_Max(), geometryDataBoundsMax); geometryData.setAttribute( GeometryPackage.eINSTANCE.getGeometryData_BoundsMm(), geometryDataBounds); if (volume == 0) { volume = getVolumeFromBounds(boundsUntransformed); } float nrTriangles = geometry.getNrIndices() / 3; Density density = new Density(eClass.getName(), (float) volume, getBiggestFaceFromBounds(boundsUntransformedMm), (long) nrTriangles, geometryInfo.getOid()); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Density(), density.getDensityValue()); generateGeometryResult.addDensity(density); double[] mibu = new double[] { (double) minBoundsUntranslated .eGet(GeometryPackage.eINSTANCE.getVector3f_X()), (double) minBoundsUntranslated .eGet(GeometryPackage.eINSTANCE.getVector3f_Y()), (double) minBoundsUntranslated .eGet(GeometryPackage.eINSTANCE.getVector3f_Z()), 1d }; double[] mabu = new double[] { (double) maxBoundsUntranslated .eGet(GeometryPackage.eINSTANCE.getVector3f_X()), (double) maxBoundsUntranslated .eGet(GeometryPackage.eINSTANCE.getVector3f_Y()), (double) maxBoundsUntranslated .eGet(GeometryPackage.eINSTANCE.getVector3f_Z()), 1d }; if (reuseGeometry) { /* TODO It still happens that geometry that should be reused is not reused, one of the reasons is still concurrency: * - When the same geometry is processed concurrently they could both do the hash check at a time when there is no cached version, then they both think it's non-reused geometry */ int hash = this.streamingGeometryGenerator.hash(indices, vertices, normals, colors); int firstIndex = indicesAsInt.get(0); int lastIndex = indicesAsInt.get(indicesAsInt.capacity() - 1); double[] firstVertex = new double[] { verticesAsDouble.get(firstIndex), verticesAsDouble.get(firstIndex + 1), verticesAsDouble.get(firstIndex + 2) }; double[] lastVertex = new double[] { verticesAsDouble.get(lastIndex * 3), verticesAsDouble.get(lastIndex * 3 + 1), verticesAsDouble.get(lastIndex * 3 + 2) }; Range range = new Range(firstVertex, lastVertex); Long referenceOid = this.streamingGeometryGenerator.hashes.get(hash); if (referenceOid != null) { HashMapVirtualObject referencedData = databaseSession .getFromCache(referenceOid); if (referencedData == null) { LOGGER.error("Object not found in cache: " + referenceOid + " (hash: " + hash + ")"); } synchronized (referencedData) { Integer currentValue = (Integer) referencedData.get("reused"); referencedData.set("reused", currentValue + 1); } HashMapWrappedVirtualObject dataBounds = (HashMapWrappedVirtualObject) referencedData .get("boundsMm"); extendBounds(boundsMm, dataBounds); referencedData.saveOverwrite(); geometryInfo.setReference( GeometryPackage.eINSTANCE.getGeometryInfo_Data(), referenceOid, 0); this.streamingGeometryGenerator.bytesSavedByHash.addAndGet(size); } else if (geometryReused) { // This is true when this geometry is part of a mapped item mapping (and used more than once) boolean found = false; // for (Range r : // reusableGeometryData) { // if (r.isSimilar(range)) { // geometryInfo.setReference(GeometryPackage.eINSTANCE.getGeometryInfo_Data(), // r.getGeometryDataOid(), 0); // float[] offset = // r.getOffset(range); // ProductDef productDef = // map.get(ifcProduct.getOid()); // double[] mappedItemMatrix = // null; // if (productDef != null && // productDef.getMatrix() != // null) { // mappedItemMatrix = // productDef.getMatrix(); // } else { // Matrix.translateM(mappedItemMatrix, // 0, offset[0], offset[1], // offset[2]); // } // double[] result = new // double[16]; // Matrix.multiplyMM(result, 0, // mappedItemMatrix, 0, // productTranformationMatrix, // 0); // setTransformationMatrix(geometryInfo, // result); // Overwritten? // bytesSavedByTransformation.addAndGet(size); // found = true; // break; // } // } if (!found) { range.setGeometryDataOid(geometryData.getOid()); reusableGeometryData.add(range); if (streamingGeometryGenerator.isCalculateQuantities()) { if (additionalData != null) { geometryInfo.setAttribute( GeometryPackage.eINSTANCE .getGeometryInfo_AdditionalData(), additionalData.toString()); if (additionalData.has("SURFACE_AREA_ALONG_Z")) { geometryInfo.setAttribute( GeometryPackage.eINSTANCE .getGeometryInfo_Area(), additionalData.get("SURFACE_AREA_ALONG_Z") .asDouble()); } if (additionalData.has("TOTAL_SHAPE_VOLUME")) { geometryInfo.setAttribute( GeometryPackage.eINSTANCE .getGeometryInfo_Volume(), additionalData.get("TOTAL_SHAPE_VOLUME") .asDouble()); } } } geometryInfo.setAttribute( GeometryPackage.eINSTANCE .getGeometryInfo_PrimitiveCount(), indicesAsInt.capacity() / 3); productToData.put(ifcProduct.getOid(), new TemporaryGeometryData(geometryData.getOid(), additionalData, indicesAsInt.capacity() / 3, size, mibu, mabu, indicesAsInt, verticesAsDouble, hasTransparency, colors.capacity())); geometryData.save(); databaseSession.cache((HashMapVirtualObject) geometryData); } } else { // if (sizes.containsKey(size) // && sizes.get(size).eClass() // == ifcProduct.eClass()) { // LOGGER.info("More reuse might // be possible " + size + " " + // ifcProduct.eClass().getName() // + ":" + ifcProduct.getOid() + // " / " + // sizes.get(size).eClass().getName() // + ":" + // sizes.get(size).getOid()); // } // if (geometryReused) { // range.setGeometryDataOid(geometryData.getOid()); // reusableGeometryData.add(range); // productToData.put(ifcProduct.getOid(), new TemporaryGeometryData(geometryData.getOid(), renderEngineInstance.getArea(), renderEngineInstance.getVolume(), indices.length / 3, size, mibu, mabu, indices, vertices)); // } // TODO else?? // So reuse is on, the data was not found by hash, and this item is not in a mapped item // By saving it before putting it in the cache/hashmap, we make sure we won't get a BimserverConcurrentModificationException geometryData.save(); // TODO Why?? databaseSession.cache((HashMapVirtualObject) geometryData); this.streamingGeometryGenerator.hashes.put(hash, geometryData.getOid()); // sizes.put(size, ifcProduct); } } else { geometryData.save(); databaseSession.cache((HashMapVirtualObject) geometryData); } this.streamingGeometryGenerator.setTransformationMatrix(geometryInfo, productTranformationMatrix); debuggingInfo.put(ifcProduct.getOid(), new DebuggingInfo(productTranformationMatrix, indices.asIntBuffer(), vertices.asFloatBuffer())); geometryInfo.save(); this.streamingGeometryGenerator.totalBytes.addAndGet(size); ifcProduct.setReference(this.streamingGeometryGenerator.geometryFeature, geometryInfo.getOid(), 0); ifcProduct.saveOverwrite(); // Doing a sync here because probably // writing large amounts of data, and db // only syncs every 100.000 writes by // default // databaseSession.getKeyValueStore().sync(); } else { // TODO } } catch (EntityNotFoundException e) { // e.printStackTrace(); // As soon as we find a representation that // is not Curve2D, then we should show a // "INFO" message in the log to indicate // there could be something wrong boolean ignoreNotFound = eClass.getName().equals("IfcAnnotation"); // for (Object rep : representations) { // if (rep instanceof // IfcShapeRepresentation) { // IfcShapeRepresentation // ifcShapeRepresentation = // (IfcShapeRepresentation)rep; // if // (!"Curve2D".equals(ifcShapeRepresentation.getRepresentationType())) // { // ignoreNotFound = false; // } // } // } if (!ignoreNotFound) { // LOGGER.warn("Entity not found " + // ifcProduct.eClass().getName() + " " + // (expressId) + "/" + // ifcProduct.getOid()); notFoundObjects.put(expressId, ifcProduct); } } catch (BimserverDatabaseException | RenderEngineException e) { StreamingGeometryGenerator.LOGGER.error("", e); } } if (geometryReused && map != null) { // We pick the first product and use that product to try and get the original data long firstKey = map.keySet().iterator().next(); ProductDef masterProductDef = map.get(firstKey); for (long key : map.keySet()) { if (key != firstKey) { ProductDef productDef = map.get(key); HashMapVirtualObject ifcProduct = productDef.getObject(); TemporaryGeometryData masterGeometryData = productToData .get(productDef.getMasterOid()); if (masterGeometryData != null) { HashMapVirtualObject geometryInfo = new HashMapVirtualObject( queryContext, GeometryPackage.eINSTANCE.getGeometryInfo()); HashMapWrappedVirtualObject bounds = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getBounds()); HashMapWrappedVirtualObject minBounds = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); HashMapWrappedVirtualObject maxBounds = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Bounds(), bounds); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_HasTransparency(), masterGeometryData.hasTransparancy()); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_NrColors(), masterGeometryData.getNrColors()); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_NrVertices(), masterGeometryData.getNrVertices()); bounds.set("min", minBounds); bounds.set("max", maxBounds); minBounds.set("x", Double.POSITIVE_INFINITY); minBounds.set("y", Double.POSITIVE_INFINITY); minBounds.set("z", Double.POSITIVE_INFINITY); maxBounds.set("x", -Double.POSITIVE_INFINITY); maxBounds.set("y", -Double.POSITIVE_INFINITY); maxBounds.set("z", -Double.POSITIVE_INFINITY); double[] mibu = masterGeometryData.getMibu(); double[] mabu = masterGeometryData.getMabu(); HashMapWrappedVirtualObject boundsUntransformed = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getBounds()); WrappedVirtualObject minBoundsUntransformed = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); WrappedVirtualObject maxBoundsUntransformed = new HashMapWrappedVirtualObject( GeometryPackage.eINSTANCE.getVector3f()); minBoundsUntransformed.set("x", mibu[0]); minBoundsUntransformed.set("y", mibu[1]); minBoundsUntransformed.set("z", mibu[2]); maxBoundsUntransformed.set("x", mabu[0]); maxBoundsUntransformed.set("y", mabu[1]); maxBoundsUntransformed.set("z", mabu[2]); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_IfcProductOid(), ifcProduct.getOid()); boundsUntransformed.setAttribute( GeometryPackage.eINSTANCE.getBounds_Min(), minBoundsUntransformed); boundsUntransformed.setAttribute( GeometryPackage.eINSTANCE.getBounds_Max(), maxBoundsUntransformed); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_BoundsUntransformed(), boundsUntransformed); ObjectNode additionalData = masterGeometryData.getAdditionalData(); double volume = 0; if (additionalData != null) { geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_AdditionalData(), additionalData.toString()); if (additionalData.has("SURFACE_AREA_ALONG_Z")) { geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Area(), additionalData.get("SURFACE_AREA_ALONG_Z").asDouble()); } if (additionalData.has("TOTAL_SHAPE_VOLUME")) { volume = additionalData.get("TOTAL_SHAPE_VOLUME").asDouble(); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Volume(), volume); } } geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_PrimitiveCount(), masterGeometryData.getNrPrimitives()); job.getReport() .incrementTriangles(masterGeometryData.getNrPrimitives()); this.streamingGeometryGenerator.bytesSavedByMapping .addAndGet(masterGeometryData.getSize()); this.streamingGeometryGenerator.totalBytes .addAndGet(masterGeometryData.getSize()); // First, invert the master's mapping matrix double[] inverted = Matrix.identity(); if (!Matrix.invertM(inverted, 0, masterProductDef.getMappingMatrix(), 0)) { LOGGER.info( "No inverse, this should not be able to happen at this time, please report"); continue; } double[] finalMatrix = Matrix.identity(); double[] totalTranformationMatrix = Matrix.identity(); // Apply the mapping matrix of the product Matrix.multiplyMM(finalMatrix, 0, productDef.getMappingMatrix(), 0, inverted, 0); // Apply the product matrix of the product Matrix.multiplyMM(totalTranformationMatrix, 0, productDef.getProductMatrix(), 0, finalMatrix, 0); if (geometryGenerationDebugger != null) { // if (debuggingInfo.containsKey(ifcProduct.getOid())) { // DebuggingInfo debuggingInfo2 = debuggingInfo.get(ifcProduct.getOid()); // DebuggingInfo debuggingInfo3 = debuggingInfo.get(productDef.getMasterOid()); // // if (debuggingInfo2.getIndices().length != debuggingInfo3.getIndices().length) { // LOGGER.error("Different sizes for indices, weird..."); // LOGGER.error(ifcProduct.getOid() + " / " + productDef.getMasterOid()); // } else { // for (int i=0; i<debuggingInfo2.getIndices().length; i++) { // int index = debuggingInfo2.getIndices()[i]; // float[] vertex = new float[]{debuggingInfo2.getVertices()[index * 3], debuggingInfo2.getVertices()[index * 3 + 1], debuggingInfo2.getVertices()[index * 3 + 2], 1}; // float[] transformedOriginal = new float[4]; // Matrix.multiplyMV(transformedOriginal, 0, debuggingInfo2.getProductTranformationMatrix(), 0, vertex, 0); // float[] transformedNew = new float[4]; // int index2 = debuggingInfo3.getIndices()[i]; // float[] vertex2 = new float[]{debuggingInfo3.getVertices()[index2 * 3], debuggingInfo3.getVertices()[index2 * 3 + 1], debuggingInfo3.getVertices()[index2 * 3 + 2], 1}; // Matrix.multiplyMV(transformedNew, 0, totalTranformationMatrix, 0, vertex2, 0); // // // TODO margin should depend on bb of complete model // if (!almostTheSame((String)ifcProduct.get("GlobalId"), transformedNew, transformedOriginal, 0.05F)) { // geometryGenerationDebugger.transformedVertexNotMatching(ifcProduct, transformedOriginal, transformedNew, debuggingInfo2.getProductTranformationMatrix(), totalTranformationMatrix); // } // } // } // almostTheSame((String)ifcProduct.get("GlobalId"), debuggingInfo2.getProductTranformationMatrix(), totalTranformationMatrix, 0.01D); // } } IntBuffer indices = masterGeometryData.getIndices(); for (int i = 0; i < indices.capacity(); i++) { this.streamingGeometryGenerator.processExtends(minBounds, maxBounds, totalTranformationMatrix, masterGeometryData.getVertices(), indices.get(i) * 3, generateGeometryResult); } HashMapWrappedVirtualObject boundsUntransformedMm = createMmBounds( geometryInfo, boundsUntransformed, generateGeometryResult.getMultiplierToMm()); geometryInfo.set("boundsUntransformedMm", boundsUntransformedMm); HashMapWrappedVirtualObject boundsMm = createMmBounds(geometryInfo, bounds, generateGeometryResult.getMultiplierToMm()); geometryInfo.set("boundsMm", boundsMm); float nrTriangles = masterGeometryData.getNrPrimitives(); Density density = new Density(eClass.getName(), (float) volume, getBiggestFaceFromBounds(boundsUntransformedMm), (long) nrTriangles, geometryInfo.getOid()); geometryInfo.setAttribute( GeometryPackage.eINSTANCE.getGeometryInfo_Density(), density.getDensityValue()); generateGeometryResult.addDensity(density); HashMapVirtualObject referencedData = databaseSession .getFromCache(masterGeometryData.getOid()); Integer currentValue = (Integer) referencedData.get("reused"); referencedData.set("reused", currentValue + 1); HashMapWrappedVirtualObject dataBounds = (HashMapWrappedVirtualObject) referencedData .get("boundsMm"); extendBounds(boundsMm, dataBounds); // TODO this keeping track of the amount of reuse, takes it's toll on memory usage. Basically all geometry ends up in memory by the time the Geometry generation is done // We should try to see whether we can use BDB's mechanism to do partial retrievals/updates of a records here, because we only need to update just one value // Another, simpler option would be to introduce another layer between GeometryInfo and GeometryData, so we don't have to cache the actual data (vertices etc... the bulk) // In that case however the BinarySerializer would increase in complexity // This seems to have been partially solved now since GeometryData does not contain the bulk of the data anymore (the byte[]s are now in "Buffer"). referencedData.saveOverwrite(); geometryInfo.setReference( GeometryPackage.eINSTANCE.getGeometryInfo_Data(), masterGeometryData.getOid(), 0); // for (int i = 0; i < // indices.length; i++) { // processExtends(geometryInfo, // productTranformationMatrix, // vertices, indices[i] * 3, // generateGeometryResult); // processExtendsUntranslated(geometryInfo, // vertices, indices[i] * 3, // generateGeometryResult); // } // calculateObb(geometryInfo, // productTranformationMatrix, // indices, vertices, // generateGeometryResult); this.streamingGeometryGenerator.setTransformationMatrix(geometryInfo, totalTranformationMatrix); geometryInfo.save(); // totalBytes.addAndGet(size); ifcProduct.setReference(this.streamingGeometryGenerator.geometryFeature, geometryInfo.getOid(), 0); ifcProduct.saveOverwrite(); } } } } } } } finally { if (renderEngine != null) { renderEnginePool.returnObject(renderEngine); } try { if (!notFoundObjects.isEmpty()) { writeDebugFile(bytes, false, notFoundObjects); StringBuilder sb = new StringBuilder(); for (Integer key : notFoundObjects.keySet()) { sb.append(key + " (" + notFoundObjects.get(key).getOid() + ")"); sb.append(", "); } sb.delete(sb.length() - 2, sb.length()); job.setException(new Exception("Missing objects in model (" + sb.toString() + ")")); } else if (writeOutputFiles) { writeDebugFile(bytes, false, null); } in.close(); } catch (Throwable e) { } finally { } this.streamingGeometryGenerator.jobsDone.incrementAndGet(); this.streamingGeometryGenerator.updateProgress(); } } catch (Exception e) { StreamingGeometryGenerator.LOGGER.error("", e); writeDebugFile(bytes, true, null); job.setException(e); // LOGGER.error("Original query: " + originalQuery, e); } } catch (Exception e) { StreamingGeometryGenerator.LOGGER.error("", e); // LOGGER.error("Original query: " + originalQuery, e); } long end = System.nanoTime(); job.setEndNanos(end); }
From source file:com.linkedin.databus.core.DbusEventBuffer.java
/** * go over all the ByteBuffers and validate them * @throws DbusEventBufferMetaInfo.DbusEventBufferMetaInfoException *///w ww . j av a2 s. c o m private void setAndValidateMMappedBuffers(DbusEventBufferMetaInfo mi) throws DbusEventBufferMetaInfo.DbusEventBufferMetaInfoException { // set buffer info - pos and limit DbusEventBufferMetaInfo.BufferInfo[] bufsInfo = null; bufsInfo = mi.getBuffersInfo(); int i = 0; for (ByteBuffer buffer : _buffers) { DbusEventBufferMetaInfo.BufferInfo bi = bufsInfo[i]; buffer.position(bi.getPos()); buffer.limit(bi.getLimit()); // validate if (buffer.position() > buffer.limit() || buffer.limit() > buffer.capacity() || buffer.capacity() != bi.getCapacity()) { String msg = "ByteBuffers don't match: i=" + i + "; pos=" + buffer.position() + "; limit=" + buffer.limit() + "; capacity=" + buffer.capacity() + "; miCapacity=" + bi.getCapacity(); throw new DbusEventBufferMetaInfo.DbusEventBufferMetaInfoException(mi, msg); } i++; } _log.info("successfully validated all " + i + " mmapped buffers"); }
From source file:com.linkedin.databus.core.DbusEventBuffer.java
/** * Sets up the buffer state to prepare for appending an event. * This includes//from w ww.j av a 2s .c om * a) moving the head far enough so that the new event does not overwrite it. * - this also implies moving the head for the ScnIndex to keep it in lock-step with the buffer * b) moving the currentWritePosition to the correct location so that the entire event will fit * into the selected ByteBuffer * @param dbusEventSize has the size of the event that will be appended. * @throws com.linkedin.databus.core.KeyTypeNotImplementedException */ private void prepareForAppend(final int dbusEventSize) throws KeyTypeNotImplementedException { boolean isDebugEnabled = LOG.isDebugEnabled(); _queueLock.lock(); try { ByteBuffer buffer = _buffers[_currentWritePosition.bufferIndex()]; //try to find a free ByteBuffer with enough space to fit the event //we will make at most three attempts: 1) current, possibly half-full, ByteBuffer //2) next, possibly last and smaller, ByteBuffer 3) makes sure at least one max capacity //ByteBuffer is check //when checking for available space at the end of a ByteBuffer always leave one free byte //to distinguish between a finalized ByteBuffer (limit <= capacity - 1) and a ByteBuffer //being written to (limit == capacity) final int maxFindBufferIter = 3; int findBufferIter = 0; for (; findBufferIter < maxFindBufferIter && buffer.capacity() - 1 - _currentWritePosition.bufferOffset() < dbusEventSize; ++findBufferIter) { if (isDebugEnabled) _log.debug("skipping buffer " + _currentWritePosition.bufferIndex() + ": " + buffer + ": insufficient capacity " + (buffer.capacity() - _currentWritePosition.bufferOffset()) + " < " + dbusEventSize); final long newWritePos = _bufferPositionParser.incrementIndex(_currentWritePosition.getPosition(), _buffers); // ensureFreeSpace will call moveHead, which also resets limit to capacity ensureFreeSpace(_currentWritePosition.getPosition(), newWritePos, isDebugEnabled); moveCurrentWritePosition(newWritePos); buffer = _buffers[_currentWritePosition.bufferIndex()]; } if (maxFindBufferIter == findBufferIter) throw new DatabusRuntimeException( "insufficient buffer capacity for event of size:" + dbusEventSize); // passing true for noLimit, because we just need to make sure we don't go over capacity, // limit will be reset in the next call final long stopIndex = _bufferPositionParser.incrementOffset(_currentWritePosition.getPosition(), dbusEventSize, _buffers, true); //no limit true - see DDSDBUS-1515 ensureFreeSpace(_currentWritePosition.getPosition(), stopIndex, isDebugEnabled); buffer.position(_currentWritePosition.bufferOffset()); } finally { _queueLock.unlock(); } }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private PendingLogTask[] journal(LogMessagingTask[] packets) { if (!ENABLE_JOURNALING) return new PendingLogTask[0]; // no error if (this.journaler.fos == null) return null; // error boolean amCoordinator = false, isAccept = false; PendingLogTask[] pending = new PendingLogTask[packets.length]; for (int i = 0; i < packets.length; i++) { LogMessagingTask pkt = packets[i]; amCoordinator = pkt.logMsg instanceof PValuePacket ? ((PValuePacket) pkt.logMsg).ballot.coordinatorID == myID : pkt.logMsg instanceof PreparePacket ? ((PreparePacket) pkt.logMsg).ballot.coordinatorID == myID : false;//from w ww . jav a2 s . co m isAccept = pkt.logMsg.getType() == PaxosPacketType.ACCEPT; if (DONT_LOG_DECISIONS && !isAccept) continue; if (NON_COORD_ONLY && amCoordinator && !COORD_STRINGIFIES_WO_JOURNALING) continue; if (COORD_ONLY && !amCoordinator) continue; if (NON_COORD_DONT_LOG_DECISIONS && !amCoordinator && !isAccept) continue; if (COORD_DONT_LOG_DECISIONS && amCoordinator && !isAccept) continue; try { { byte[] bytes = !NO_STRINGIFY_JOURNALING && !(COORD_JOURNALS_WO_STRINGIFYING && amCoordinator) ? toBytes(pkt.logMsg) : Arrays.copyOf(testBytes, ((RequestPacket) pkt.logMsg).lengthEstimate()); if (JOURNAL_COMPRESSION) bytes = deflate(bytes); // format: <size><message>* ByteBuffer bbuf = ByteBuffer.allocate(4 + bytes.length); bbuf.putInt(bytes.length); bbuf.put(bytes); if (ALL_BUT_APPEND) continue; if (STRINGIFY_WO_JOURNALING || (COORD_STRINGIFIES_WO_JOURNALING && amCoordinator)) continue; // else append to log file *after* creating pending task if (DB_INDEX_JOURNAL) synchronized (this) { SQLPaxosLogger.this.pendingLogMessages.add(pending[i] = new PendingLogTask(packets[i], this.journaler.curLogfile, this.journaler.curLogfileSize, bytes.length)); } else if (PAUSABLE_INDEX_JOURNAL) this.messageLog.add(packets[i].logMsg, this.journaler.curLogfile, this.journaler.curLogfileSize, bytes.length); if (USE_MAP_DB && Util.oneIn(1000)) this.mapDB.dbMemory.commit(); SQLPaxosLogger.this.journaler.appendToLogFile(bbuf.array(), pkt.logMsg.getPaxosID()); assert (pending[i] == null || this.journaler.curLogfileSize == pending[i].logfileOffset + bbuf.capacity()); } } catch (IOException ioe) { ioe.printStackTrace(); return null; } } if (this.journaler.curLogfileSize > MAX_LOG_FILE_SIZE) { // always commit pending before rolling log file log.log(Level.FINE, "{0} rolling log file {1}", new Object[] { SQLPaxosLogger.this.journaler, SQLPaxosLogger.this.journaler.curLogfile }); // DelayProfiler.updateMovAvg("#fgsync", // this.pendingLogMessages.size()); // first sync, then roll log file SQLPaxosLogger.this.syncLogMessagesIndex(); long t = System.currentTimeMillis(); SQLPaxosLogger.this.journaler.rollLogFile(); DelayProfiler.updateDelay("rolllog", t, 1.0); if (this.journaler.shouldGC()) { this.GC.submit(new TimerTask() { @Override public void run() { try { Thread.currentThread().setPriority(Thread.MIN_PRIORITY); SQLPaxosLogger.this .garbageCollectJournal(SQLPaxosLogger.this.journaler.getGCCandidates()); } catch (Exception | Error e) { log.severe(this + " incurred exception " + (e.getMessage() != null ? e.getMessage() : e) + " while garbage collecting logfiles"); e.printStackTrace(); } } }, 0); } } if (!DB_INDEX_JOURNAL && Util.oneIn(Integer.MAX_VALUE)) // used only for testing SQLPaxosLogger.deleteOldCheckpoints(logDirectory, SQLPaxosLogger.this.journaler.logfilePrefix, 5, this); return pending; }
From source file:com.linkedin.databus.core.DbusEventBuffer.java
private int readEventsInternal(ReadableByteChannel readChannel, Iterable<InternalDatabusEventsListener> eventListeners, DbusEventsStatisticsCollector statsCollector) throws InvalidEventException { final boolean logDebugEnabled = _log.isDebugEnabled(); ReadEventsReadPosition readPos = new ReadEventsReadPosition(); ReadEventsWritePosition writePos = new ReadEventsWritePosition(); _readBufferLock.lock();/*from w ww. j av a 2 s . c o m*/ try { _eventState = WindowState.IN_READ; boolean mightHaveMoreData = true; //ensuring index is updated correctly if a control event of preceding window doesn't appear //first (no start() called) if (_scnIndex.isEnabled() && _scnIndex.isEmpty()) { _scnIndex.setUpdateOnNext(true); } try { while (mightHaveMoreData) { final ByteBuffer readBuffer = readPos.getReadBuffer(); boolean success = readEventsFromChannel(readChannel, readBuffer, logDebugEnabled); readPos.startIteration(); final int numBytesRead = readPos.bytesRemaining(); //if there is an error we'll try to process whatever was read but stop after that mightHaveMoreData = success && (numBytesRead > 0) && (readBuffer.position() == readBuffer.limit()); if (numBytesRead > 0) { _queueLock.lock(); try { if (isClosed()) { LOG.warn( "stopping attempt to read more events into a buffer while it is closed. readPos=" + readPos + "; buf=" + this.toString()); return 0; } try { _scnIndex.assertHeadPosition(_head.getRealPosition()); _bufferPositionParser.assertSpan(_head.getPosition(), _currentWritePosition.getPosition(), logDebugEnabled); } catch (RuntimeException re) { _log.fatal("Got runtime Exception :", re); _log.fatal("Event Buffer is :" + toString()); _scnIndex.printVerboseString(_log, Level.DEBUG); throw re; } readBuffer.flip(); boolean hasMoreInStgBuffer = true; while (hasMoreInStgBuffer && readPos.hasNext()) { writePos.startNewIteration(); //figure out the boundary of events at which we can write //leave one byte at the end, to distinguish between a finalized full ByteBuffer //(limit <= capacity - 1) and a ByteBuffer that is still being written to //(limit == capacity) final int contiguousCapacity = writePos.getCurBuf().capacity() - writePos.getCurOfs() - 1; final ReadEventsScanStatus eventScanStatus = readPos.startEventProcessing(); switch (eventScanStatus) { case OK: { final int curEventSize = readPos.getCurEvent().size(); if (readPos.bytesProcessed() + curEventSize > contiguousCapacity) { //not enough space to fit event in the target buffer if (0 == writePos.getCurOfs()) { //event bigger than the ByteBuffer capacity throw new InvalidEventException("event too big to fit into buffer" + "; size:" + curEventSize + "; event:" + readPos.getCurEvent() + "; " + readPos + "; buffer.capacity:" + writePos.getCurBuf().capacity()); } else { if (logDebugEnabled) _log.debug("unable to fit event with size " + readPos.getCurEvent().size()); //if we could not fit all the data in the destination ByteBuffer, //we should ensure that we clear up any remaining data in the //ByteBuffer. long nextBufferPos = _bufferPositionParser .incrementIndex(writePos.getCurPos(), _buffers); boolean interrupted = ensureFreeSpace(writePos.getCurPos(), nextBufferPos, logDebugEnabled); if (interrupted) { _log.warn("ensureFree space interrupted: " + readPos + " " + writePos); return readPos.getNumReadEvents(); } assert assertBuffersLimits(); writePos.moveToNextBuffer(); _tail.copy(_currentWritePosition); assert assertBuffersLimits(); } } else { //we can fit the event in the target buffer readPos.eventAccepted(); //done with processing in the stg buffer //how are we on free space? boolean interrupted = ensureFreeSpace(writePos.getCurPos(), writePos.getCurPos() + curEventSize, logDebugEnabled); if (interrupted) { _log.warn("ensureFree space interrupted:" + readPos + " " + writePos); return readPos.getNumReadEvents(); } writePos.determineWriteEnd(readPos); //we are good on free space, about time to copy the damn data copyReadEventToEventBuffer(readPos, writePos, eventListeners, statsCollector, logDebugEnabled); } break; } case PARTIAL_EVENT: { final int curCapacity = readBuffer.capacity(); if (logDebugEnabled) _log.debug("partial event at " + readPos); if (0 != readPos.getReadStart()) { //compact stg buffer and try to read more data from the network compactStgBuffer(readPos, logDebugEnabled); hasMoreInStgBuffer = false; } else if (curCapacity >= getMaxReadBufferCapacity()) { //we couldn't read an entire event in the staging buffer and we are already //at max allowed size of the read buffer throw new InvalidEventException( "event too big to fit in staging buffer with capacity : " + curCapacity + "; readPos:" + readPos + "; consider increasing connectionDefaults.eventBuffer.maxSize" + " or connectionDefaults.eventBuffer.maxEventSize if set explicitly."); } else { //grow the staging buffer faster for small sizes and slower for big sizes //intuitively: <= 5K - 3x, 25K - 2x, 125K - 1.6x, 625K - 1.5x and so on final double growFactor = curCapacity <= 5 * 1024 ? 3.0 : 1.0 + 2.0 * LN_5 / Math.log(curCapacity / 1024.0); final int newSize = Math.min(getMaxReadBufferCapacity(), (int) (growFactor * curCapacity)); if (newSize < curCapacity) { throw new DatabusRuntimeException("unexpected readbuffer size: " + newSize + "; readBuffer=" + readBuffer + "; readBufferCapacity=" + getMaxReadBufferCapacity()); } readPos.growReadBuffer(newSize); hasMoreInStgBuffer = false; } break; } case SCN_REGRESSION: { // events should be monotonically increasing // skipping the event and all the events before it (same buffer should have // only increasing events) String errMsg = logSequenceErrorPackets(readPos); _log.warn("got an old event: seq=" + readPos.getSeq() + ", " + errMsg); readPos.eventSkipped(); break; } case INVALID_EVENT: { if (null != statsCollector) statsCollector .registerEventError(DbusEventInternalReadable.EventScanStatus.ERR); throw new InvalidEventException(); } case MISSING_EOP: { String errMsg = logSequenceErrorPackets(readPos); _log.error("detected missing EOP: " + errMsg); throw new InvalidEventException(errMsg); } default: throw new IllegalStateException("unknown scan status: " + eventScanStatus); } } if (!readPos.hasNext()) { readBuffer.clear(); } } finally { _queueLock.unlock(); } } } } finally { if (null != statsCollector) { statsCollector.registerBufferMetrics(getMinScn(), this.lastWrittenScn(), this.getPrevScn(), this.getBufferFreeSpace()); statsCollector.registerTimestampOfFirstEvent(_timestampOfFirstEvent); } _eventState = WindowState.ENDED; } } catch (RuntimeException re) { _log.error("Got runtime exception in readEvents: " + re.getMessage(), re); _log.error("Buffer State: " + toString()); throw re; } finally { _readBufferLock.unlock(); writePos.close(); } if (logDebugEnabled) _log.debug("readEvents result: " + readPos + " " + writePos); return readPos.getNumReadEvents(); }