List of usage examples for java.lang Thread holdsLock
public static native boolean holdsLock(Object obj);
From source file:org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.java
private Edit getEditInstance(FSEditLogOp op) { final Edit edit; final Server.Call rpcCall = Server.getCurCall().get(); // only rpc calls not explicitly sync'ed on the log will be async. if (rpcCall != null && !Thread.holdsLock(this)) { edit = new RpcEdit(this, op, rpcCall); } else {/*from w w w. j a va 2 s.c om*/ edit = new SyncEdit(this, op); } return edit; }
From source file:org.apache.hadoop.hdfs.server.namenode.FSEditLog.java
private long beginTransaction() { assert Thread.holdsLock(this); // get a new transactionId txid++;//from w w w.ja va 2 s .c om // // record the transactionId when new data was written to the edits log // TransactionId id = myTransactionId.get(); id.txid = txid; return now(); }
From source file:org.apache.hadoop.hdfs.server.namenode.FSEditLog.java
private void endTransaction(long start) { assert Thread.holdsLock(this); // update statistics long end = now(); numTransactions++;//from w ww . ja v a2 s .c om totalTimeTransactions += (end - start); if (metrics != null) // Metrics is non-null only when used inside name node metrics.addTransaction(end - start); }
From source file:org.apache.hadoop.hdfs.server.namenode.FSEditLog.java
/** * Start writing to the log segment with the given txid. * Transitions from BETWEEN_LOG_SEGMENTS state to IN_LOG_SEGMENT state. *///from w w w .jav a2 s. com private void startLogSegment(final long segmentTxId) throws IOException { assert Thread.holdsLock(this); LOG.info("Starting log segment at " + segmentTxId); Preconditions.checkArgument(segmentTxId > 0, "Bad txid: %s", segmentTxId); Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS, "Bad state: %s", state); Preconditions.checkState(segmentTxId > curSegmentTxId, "Cannot start writing to log segment " + segmentTxId + " when previous log segment started at " + curSegmentTxId); Preconditions.checkArgument(segmentTxId == txid + 1, "Cannot start log segment at txid %s when next expected " + "txid is %s", segmentTxId, txid + 1); numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0; // TODO no need to link this back to storage anymore! // See HDFS-2174. storage.attemptRestoreRemovedStorage(); try { editLogStream = journalSet.startLogSegment(segmentTxId); } catch (IOException ex) { throw new IOException( "Unable to start log segment " + segmentTxId + ": too few journals successfully started.", ex); } curSegmentTxId = segmentTxId; state = State.IN_SEGMENT; }
From source file:org.apache.hadoop.hdfs.server.namenode.FSNamesystem.java
/** * Move a file that is being written to be immutable. * @param src The filename/*from w w w .j av a2 s . c o m*/ * @param lease The lease for the client creating the file */ void internalReleaseLeaseOne(Lease lease, String src) throws IOException { assert Thread.holdsLock(this); LOG.info("Recovering lease=" + lease + ", src=" + src); INodeFile iFile = dir.getFileINode(src); if (iFile == null) { final String message = "DIR* NameSystem.internalReleaseCreate: " + "attempt to release a create lock on " + src + " file does not exist."; NameNode.stateChangeLog.warn(message); throw new IOException(message); } if (!iFile.isUnderConstruction()) { final String message = "DIR* NameSystem.internalReleaseCreate: " + "attempt to release a create lock on " + src + " but file is already closed."; NameNode.stateChangeLog.warn(message); throw new IOException(message); } INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) iFile; // Initialize lease recovery for pendingFile. If there are no blocks // associated with this file, then reap lease immediately. Otherwise // renew the lease and trigger lease recovery. if (pendingFile.getTargets() == null || pendingFile.getTargets().length == 0) { if (pendingFile.getBlocks().length == 0) { finalizeINodeFileUnderConstruction(src, pendingFile); NameNode.stateChangeLog .warn("BLOCK*" + " internalReleaseLease: No blocks found, lease removed for " + src); return; } // setup the Inode.targets for the last block from the blocksMap // Block[] blocks = pendingFile.getBlocks(); Block last = blocks[blocks.length - 1]; DatanodeDescriptor[] targets = new DatanodeDescriptor[blocksMap.numNodes(last)]; Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(last); for (int i = 0; it != null && it.hasNext(); i++) { targets[i] = it.next(); } pendingFile.setTargets(targets); } // start lease recovery of the last block for this file. pendingFile.assignPrimaryDatanode(); Lease reassignedLease = reassignLease(lease, src, HdfsConstants.NN_RECOVERY_LEASEHOLDER, pendingFile); leaseManager.renewLease(reassignedLease); }
From source file:org.apache.hadoop.mapred.CreditScheduler.java
private void jobNoLongerRunning(JobInProgress job) { assert Thread.holdsLock(this); JobInfo info = infos.remove(job);/*from ww w.jav a2 s . c o m*/ if (info != null) { info.mapSchedulable.cleanupMetrics(); info.reduceSchedulable.cleanupMetrics(); } poolMgr.removeJob(job); }
From source file:org.apache.ignite.internal.managers.deployment.GridDeploymentClassLoader.java
/** {@inheritDoc} */ @Override/*from www .j a v a2 s . co m*/ public Class<?> loadClass(String name) throws ClassNotFoundException { assert !Thread.holdsLock(mux); // Check if we have package name on list of P2P loaded. // ComputeJob must be always loaded locally to avoid // any possible class casting issues. Class<?> cls = null; try { if (!"org.apache.ignite.compute.ComputeJob".equals(name)) { if (isLocallyExcluded(name)) // P2P loaded class. cls = p2pLoadClass(name, true); } if (cls == null) cls = loadClass(name, true); } catch (ClassNotFoundException e) { throw e; } // Catch Throwable to secure against any errors resulted from // corrupted class definitions or other user errors. catch (Exception e) { throw new ClassNotFoundException("Failed to load class due to unexpected error: " + name, e); } return cls; }
From source file:org.apache.ignite.internal.managers.deployment.GridDeploymentClassLoader.java
/** * Loads the class with the specified binary name. The * default implementation of this method searches for classes in the * following order://from w ww . j a va2s. c o m * <p> * <ol> * <li> Invoke {@link #findLoadedClass(String)} to check if the class * has already been loaded. </li> * <li>Invoke the {@link #findClass(String)} method to find the class.</li> * </ol> * <p> If the class was found using the above steps, and the * {@code resolve} flag is true, this method will then invoke the {@link * #resolveClass(Class)} method on the resulting {@code Class} object. * * @param name The binary name of the class. * @param resolve If {@code true} then resolve the class. * @return The resulting {@code Class} object. * @throws ClassNotFoundException If the class could not be found */ @Nullable private Class<?> p2pLoadClass(String name, boolean resolve) throws ClassNotFoundException { assert !Thread.holdsLock(mux); // First, check if the class has already been loaded. Class<?> cls = findLoadedClass(name); if (cls == null) cls = findClass(name); if (resolve) resolveClass(cls); return cls; }
From source file:org.apache.ignite.internal.managers.deployment.GridDeploymentClassLoader.java
/** {@inheritDoc} */ @Nullable// ww w.ja v a2 s . com @Override protected Class<?> findClass(String name) throws ClassNotFoundException { assert !Thread.holdsLock(mux); if (!isLocallyExcluded(name)) { // This is done for URI deployment in which case the parent loader // does not have the requested resource, but it is still locally // available. GridDeployment dep = ctx.deploy().getLocalDeployment(name); if (dep != null) { if (log.isDebugEnabled()) log.debug("Found class in local deployment [cls=" + name + ", dep=" + dep + ']'); return dep.deployedClass(name); } } String path = U.classNameToResourceName(name); GridByteArrayList byteSrc = sendClassRequest(name, path); synchronized (this) { Class<?> cls = findLoadedClass(name); if (cls == null) { if (byteMap != null) byteMap.put(path, byteSrc.array()); cls = defineClass(name, byteSrc.internalArray(), 0, byteSrc.size()); /* Define package in classloader. See URLClassLoader.defineClass(). */ int i = name.lastIndexOf('.'); if (i != -1) { String pkgName = name.substring(0, i); if (getPackage(pkgName) == null) // Too much nulls is normal because we don't have package's meta info. definePackage(pkgName, null, null, null, null, null, null, null); } } if (log.isDebugEnabled()) log.debug("Loaded class [cls=" + name + ", ldr=" + this + ']'); return cls; } }
From source file:org.apache.ignite.internal.managers.deployment.GridDeploymentClassLoader.java
/** * Sends class-loading request to all nodes associated with this class loader. * * @param name Class name./* w w w . jav a 2 s . co m*/ * @param path Class path. * @return Class byte source. * @throws ClassNotFoundException If class was not found. */ private GridByteArrayList sendClassRequest(String name, String path) throws ClassNotFoundException { assert !Thread.holdsLock(mux); long endTime = computeEndTime(p2pTimeout); Collection<UUID> nodeListCp; Map<UUID, IgniteUuid> nodeLdrMapCp; synchronized (mux) { // Skip requests for the previously missed classes. if (missedRsrcs != null && missedRsrcs.contains(path)) throw new ClassNotFoundException("Failed to peer load class [class=" + name + ", nodeClsLdrIds=" + nodeLdrMap + ", parentClsLoader=" + getParent() + ']'); // If single-node mode, then node cannot change and we simply reuse list and map. // Otherwise, make copies that can be used outside synchronization. nodeListCp = singleNode ? nodeList : new LinkedList<>(nodeList); nodeLdrMapCp = singleNode ? nodeLdrMap : new HashMap<>(nodeLdrMap); } IgniteCheckedException err = null; for (UUID nodeId : nodeListCp) { if (nodeId.equals(ctx.discovery().localNode().id())) // Skip local node as it is already used as parent class loader. continue; IgniteUuid ldrId = nodeLdrMapCp.get(nodeId); ClusterNode node = ctx.discovery().node(nodeId); if (node == null) { if (log.isDebugEnabled()) log.debug("Found inactive node in class loader (will skip): " + nodeId); continue; } try { GridDeploymentResponse res = comm.sendResourceRequest(path, ldrId, node, endTime); if (res == null) { String msg = "Failed to send class-loading request to node (is node alive?) [node=" + node.id() + ", clsName=" + name + ", clsPath=" + path + ", clsLdrId=" + ldrId + ", parentClsLdr=" + getParent() + ']'; if (!quiet) U.warn(log, msg); else if (log.isDebugEnabled()) log.debug(msg); err = new IgniteCheckedException(msg); continue; } if (res.success()) return res.byteSource(); // In case of shared resources/classes all nodes should have it. if (log.isDebugEnabled()) log.debug("Failed to find class on remote node [class=" + name + ", nodeId=" + node.id() + ", clsLdrId=" + ldrId + ", reason=" + res.errorMessage() + ']'); synchronized (mux) { if (missedRsrcs != null) missedRsrcs.add(path); } throw new ClassNotFoundException( "Failed to peer load class [class=" + name + ", nodeClsLdrs=" + nodeLdrMapCp + ", parentClsLoader=" + getParent() + ", reason=" + res.errorMessage() + ']'); } catch (IgniteCheckedException e) { // This thread should be interrupted again in communication if it // got interrupted. So we assume that thread can be interrupted // by processing cancellation request. if (Thread.currentThread().isInterrupted()) { if (!quiet) U.error(log, "Failed to find class probably due to task/job cancellation: " + name, e); else if (log.isDebugEnabled()) log.debug("Failed to find class probably due to task/job cancellation [name=" + name + ", err=" + e + ']'); } else { if (!quiet) U.warn(log, "Failed to send class-loading request to node (is node alive?) [node=" + node.id() + ", clsName=" + name + ", clsPath=" + path + ", clsLdrId=" + ldrId + ", parentClsLdr=" + getParent() + ", err=" + e + ']'); else if (log.isDebugEnabled()) log.debug("Failed to send class-loading request to node (is node alive?) [node=" + node.id() + ", clsName=" + name + ", clsPath=" + path + ", clsLdrId=" + ldrId + ", parentClsLdr=" + getParent() + ", err=" + e + ']'); err = e; } } } throw new ClassNotFoundException("Failed to peer load class [class=" + name + ", nodeClsLdrs=" + nodeLdrMapCp + ", parentClsLoader=" + getParent() + ']', err); }