List of usage examples for java.lang System identityHashCode
@HotSpotIntrinsicCandidate public static native int identityHashCode(Object x);
From source file:org.nightlabs.jfire.trade.Article.java
@Override public String toString() { return this.getClass().getName() + '@' + Integer.toHexString(System.identityHashCode(this)) + '[' + organisationID + ',' + ObjectIDUtil.longObjectIDFieldToString(articleID) + ']' + "(version " + JDOHelper.getVersion(this) + ')'; }
From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler.java
private static String addSingleFileToDistributedCache(PigContext pigContext, Configuration conf, String filename, String prefix) throws IOException { if (!pigContext.inIllustrator && !FileLocalizer.fileExists(filename, pigContext)) { throw new IOException("Internal error: skew join partition file " + filename + " does not exist"); }/* w w w .j a v a 2 s. co m*/ String symlink = filename; // XXX Hadoop currently doesn't support distributed cache in local mode. // This line will be removed after the support is added by Hadoop team. if (!Utils.isLocal(pigContext, conf)) { symlink = prefix + "_" + Integer.toString(System.identityHashCode(filename)) + "_" + Long.toString(System.currentTimeMillis()); filename = filename + "#" + symlink; setupDistributedCache(pigContext, conf, new String[] { filename }, false); } return symlink; }
From source file:org.pentaho.reporting.engine.classic.core.layout.output.AbstractReportProcessor.java
public void processReport() throws ReportProcessingException { PerformanceLoggingStopWatch swGlobal = getPerformanceMonitorContext() .createStopWatch(PerformanceTags.REPORT_PROCESSING); try {/*from www.j a v a2 s . c o m*/ swGlobal.start(); if (AbstractReportProcessor.logger.isDebugEnabled()) { AbstractReportProcessor.logger.debug(new MemoryUsageMessage( System.identityHashCode(Thread.currentThread()) + ": Report processing time: Starting: ")); } try { final long startTime = System.currentTimeMillis(); fireProcessingStarted(new ReportProgressEvent(this)); if (isPaginated() == false) { // Processes the whole report .. prepareReportProcessing(); } PerformanceLoggingStopWatch sw = getPerformanceMonitorContext() .createStopWatch(PerformanceTags.REPORT_GENERATE); try { sw.start(); final long paginateTime = System.currentTimeMillis(); if (AbstractReportProcessor.logger.isDebugEnabled()) { AbstractReportProcessor.logger .debug(new MemoryUsageMessage(System.identityHashCode(Thread.currentThread()) + ": Report processing time: Pagination time: " + ((paginateTime - startTime) / 1000.0))); } if (getLogicalPageCount() == 0) { throw new EmptyReportException("Report did not generate any content."); } // Start from scratch ... PageState state = getLogicalPageState(0); while (state != null) { state = processPage(state, true); } final long endTime = System.currentTimeMillis(); if (AbstractReportProcessor.logger.isDebugEnabled()) { AbstractReportProcessor.logger .debug(new MemoryUsageMessage(System.identityHashCode(Thread.currentThread()) + ": Report processing time: " + ((endTime - startTime) / 1000.0))); } } finally { sw.close(); } } catch (EmptyReportException re) { throw re; } catch (ReportInterruptedException interrupt) { // log interruption stacktrace in case of debug level only // since interruption is ok. if (AbstractReportProcessor.logger.isDebugEnabled()) { AbstractReportProcessor.logger .debug("Report processing interrupted: " + this.getClass().getName(), interrupt); } throw interrupt; } catch (ReportProcessingException re) { AbstractReportProcessor.logger .error(System.identityHashCode(Thread.currentThread()) + ": Report processing failed.", re); throw re; } catch (Exception e) { AbstractReportProcessor.logger .error(System.identityHashCode(Thread.currentThread()) + ": Report processing failed.", e); throw new ReportProcessingException("Failed to process the report", e); } fireProcessingFinished(new ReportProgressEvent(this, getLogicalPageCount(), getLogicalPageCount())); if (AbstractReportProcessor.logger.isDebugEnabled()) { AbstractReportProcessor.logger .debug(System.identityHashCode(Thread.currentThread()) + ": Report processing finished."); } } finally { swGlobal.close(); } }
From source file:org.apache.hadoop.hive.ql.exec.tez.WorkloadManager.java
@VisibleForTesting /**/*www . j a va 2s .com*/ * Adds a test event that's processed at the end of WM iteration. * This allows tests to wait for an iteration to finish without messing with the threading * logic (that is prone to races if we e.g. remember the state before and wait for it to change, * self-deadlocking when triggering things explicitly and calling a blocking API, and hanging * forever if we wait for "another iteration"). If addTestEvent is called after all the other * calls of interest, it is guaranteed that the events from those calls will be processed * fully when the future is triggered. */ Future<Boolean> addTestEvent() { SettableFuture<Boolean> testEvent = SettableFuture.create(); currentLock.lock(); try { LOG.info("Adding test event " + System.identityHashCode(testEvent)); current.testEvents.add(testEvent); notifyWmThreadUnderLock(); } finally { currentLock.unlock(); } return testEvent; }
From source file:org.mule.transport.AbstractConnector.java
@Override public String toString() { final StringBuffer sb = new StringBuffer(120); final String nl = System.getProperty("line.separator"); sb.append(ClassUtils.getSimpleName(this.getClass())); // format message for multi-line output, single-line is not readable sb.append(nl);/*from w w w . jav a2 s . c o m*/ sb.append("{"); sb.append(nl); sb.append(" name=").append(name); sb.append(nl); sb.append(" lifecycle=") .append(lifecycleManager == null ? "<not in lifecycle>" : lifecycleManager.getCurrentPhase()); sb.append(nl); sb.append(" this=").append(Integer.toHexString(System.identityHashCode(this))); sb.append(nl); sb.append(" numberOfConcurrentTransactedReceivers=").append(numberOfConcurrentTransactedReceivers); sb.append(nl); sb.append(" createMultipleTransactedReceivers=").append(createMultipleTransactedReceivers); sb.append(nl); sb.append(" connected=").append(connected); sb.append(nl); sb.append(" supportedProtocols=").append(supportedProtocols); sb.append(nl); sb.append(" serviceOverrides="); if (serviceOverrides != null) { for (Map.Entry<Object, Object> entry : serviceOverrides.entrySet()) { sb.append(nl); sb.append(" ").append(String.format("%s=%s", entry.getKey(), entry.getValue())); } } else { sb.append("<none>"); } sb.append(nl); sb.append('}'); sb.append(nl); return sb.toString(); }
From source file:com.xpn.xwiki.api.Document.java
/** * Drop permissions for the remainder of the rendering cycle. * After this is called://from w w w.j a va 2 s . c o m * <ul> * <li>1. {@link com.xpn.xwiki.api.Api#hasProgrammingRights()} will always return false.</li> * <li>2. {@link com.xpn.xwiki.api.XWiki#getDocumentAsAuthor(org.xwiki.model.reference.DocumentReference)}, * {@link com.xpn.xwiki.api.XWiki#getDocumentAsAuthor(String)}, {@link com.xpn.xwiki.api.Document#saveAsAuthor()}, * {@link com.xpn.xwiki.api.Document#saveAsAuthor(String)}, * {@link com.xpn.xwiki.api.Document#saveAsAuthor(String, boolean)}, and * {@link com.xpn.xwiki.api.Document#deleteAsAuthor()} will perform all of their actions as if the document's * content author was the guest user (XWiki.XWikiGuest).</li> * </ul> * <p> * This sandboxing will expire at the end of the rendering cycle and can be suspended by * beginning a new rendering cycle. A rendering cycle can be begin by calling * {@link #getRenderedContent(String)}, {@link #display(String)} (or variations thereof) * or by invoking the include macro or using {@link com.xpn.xwiki.api.XWiki#includeTopic(String)} * <p> * NOTE: Even if you include the same document, permissions will be regained. * What this does is sandbox the remainder of the code on the page because although * it can temporarily suspend the permissions drop, it cannot get itself to be executed * with permissions because if it calls itself, it will hit the drop function first. * <p> * If you are interested in a more secure sandboxing method where code is guaranteed not * to have permissions for the remainder of the request, you should consider * {@link com.xpn.xwiki.api.Context#dropPermissions()}. * <p> * * @since 3.2M2 */ public void dropPermissions() { // Set the droppedPermissions key to the context so if the context is cloned and // pushed, it will return false until it is popped again. final ExecutionContext context = Utils.getComponent(Execution.class).getContext(); context.setProperty(XWikiConstant.DROPPED_PERMISSIONS, System.identityHashCode(context)); }
From source file:gdsc.smlm.ij.plugins.CreateData.java
/** * Get a random generator. The generators used in the simulation can be adjusted by changing this method. * // w ww. j a va2 s.co m * @param seedAddition * Added to the seed generated from the system time * @return A random generator */ private RandomGenerator createRandomGenerator(int seedAddition) { return new Well44497b(System.currentTimeMillis() + System.identityHashCode(this) + seedAddition); //return new Well19937c(System.currentTimeMillis() + System.identityHashCode(this)); }
From source file:org.apache.geode.internal.cache.Oplog.java
private void writeOneKeyEntryForKRF(KRFEntry ke) throws IOException { DiskEntry de = ke.getDiskEntry();/*from www. j a v a 2s . c o m*/ long diskRegionId = ke.getDiskRegionView().getId(); long oplogKeyId; byte userBits; long valueOffset; int valueLength; Object deKey; VersionHolder tag = ke.versionTag; synchronized (de) { DiskId di = de.getDiskId(); if (di == null) { return; } if (de.isRemovedFromDisk()) { // the entry was concurrently removed return; } synchronized (di) { // Make sure each one is still in this oplog. if (di.getOplogId() != getOplogId()) { return; } userBits = di.getUserBits(); oplogKeyId = di.getKeyId(); valueOffset = di.getOffsetInOplog(); valueLength = di.getValueLength(); deKey = de.getKey(); if (valueOffset < 0) { assert (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits)); } } if (tag == null) { if (EntryBits.isWithVersions(userBits) && de.getVersionStamp() != null) { tag = de.getVersionStamp().asVersionTag(); } else if (de.getVersionStamp() != null) { throw new AssertionError("No version bits on entry we're writing to the krf " + de); } } } if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) { logger.trace(LogMarker.PERSIST_WRITES, "krf oplogId={} key={} oplogKeyId={} de={} vo={} vl={} diskRegionId={} version tag={}", oplogId, deKey, oplogKeyId, System.identityHashCode(de), valueOffset, valueLength, diskRegionId, tag); } byte[] keyBytes = EntryEventImpl.serialize(deKey); // skip the invalid entries, theire valueOffset is -1 writeOneKeyEntryForKRF(keyBytes, userBits, valueLength, diskRegionId, oplogKeyId, valueOffset, tag); }
From source file:android.app.Activity.java
void dumpInner(String prefix, FileDescriptor fd, PrintWriter writer, String[] args) { writer.print(prefix);//from w w w . j av a 2 s . c om writer.print("Local Activity "); writer.print(Integer.toHexString(System.identityHashCode(this))); writer.println(" State:"); String innerPrefix = prefix + " "; writer.print(innerPrefix); writer.print("mResumed="); writer.print(mResumed); writer.print(" mStopped="); writer.print(mStopped); writer.print(" mFinished="); writer.println(mFinished); writer.print(innerPrefix); writer.print("mLoadersStarted="); writer.println(mLoadersStarted); writer.print(innerPrefix); writer.print("mChangingConfigurations="); writer.println(mChangingConfigurations); writer.print(innerPrefix); writer.print("mCurrentConfig="); writer.println(mCurrentConfig); if (mLoaderManager != null) { writer.print(prefix); writer.print("Loader Manager "); writer.print(Integer.toHexString(System.identityHashCode(mLoaderManager))); writer.println(":"); mLoaderManager.dump(prefix + " ", fd, writer, args); } mFragments.dump(prefix, fd, writer, args); writer.print(prefix); writer.println("View Hierarchy:"); dumpViewHierarchy(prefix + " ", writer, getWindow().getDecorView()); }