List of usage examples for java.lang Thread holdsLock
public static native boolean holdsLock(Object obj);
From source file:Model.RaptorUCIEngine.java
/** * the engine wants to send infos to the GUI. This should be done whenever * one of the info has changed. The engine can send only selected infos and * multiple infos can be send with one info command, e.g. * "info currmove e2e4 currmovenumber 1" or * "info depth 12 nodes 123456 nps 100000". Also all infos belonging to the * pv should be sent together e.g.// w w w .j a va2s.c om * "info depth 2 score cp 214 time 1242 nodes 2124 nps 34928 pv e2e4 e7e5 g1f3" * I suggest to start sending "currmove", "currmovenumber", "currline" and * "refutation" only after one second to avoid too much traffic. * * <pre> * Additional info: * depth * search depth in plies * seldepth * selective search depth in plies, * if the engine sends seldepth there must also a "depth" be present in the same string. * time * the time searched in ms, this should be sent together with the pv. * nodes * x nodes searched, the engine should send this info regularly * pv ... * the best line found * multipv * this for the multi pv mode. * for the best move/pv add "multipv 1" in the string when you send the pv. * in k-best mode always send all k variants in k strings together. * score * cp * the score from the engine's point of view in centipawns. * mate * mate in y moves, not plies. * If the engine is getting mated use negativ values for y. * lowerbound * the score is just a lower bound. * upperbound * the score is just an upper bound. * currmove * currently searching this move * currmovenumber * currently searching move number x, for the first move x should be 1 not 0. * hashfull * the hash is x permill full, the engine should send this info regularly * nps * x nodes per second searched, the engine should send this info regularly * tbhits * x positions where found in the endgame table bases * cpuload * the cpu usage of the engine is x permill. * string * any string str which will be displayed be the engine, * if there is a string command the rest of the line will be interpreted as . * refutation ... * move is refuted by the line ... , i can be any number >= 1. * Example: after move d1h5 is searched, the engine can send * "info refutation d1h5 g6h5" * if g6h5 is the best answer after d1h5 or if g6h5 refutes the move d1h5. * if there is norefutation for d1h5 found, the engine should just send * "info refutation d1h5" * The engine should only send this if the option "UCI_ShowRefutations" is set to true. * currline ... * this is the current line the engine is calculating. is the number of the cpu if * the engine is running on more than one cpu. = 1,2,3.... * if the engine is just using one cpu, can be omitted. * If is greater than 1, always send all k lines in k strings together. * The engine should only send this if the option "UCI_ShowCurrLine" is set to true. * </pre> * * Examples: * * <pre> * go infinite * info depth 1 seldepth 0 time 34 nodes 0 nps 151466 score cp 1 pv c7c5 * info nps 151466 nodes 0 cpuload 0 hashfull 0 time 35 * bestmove c7c5 * stop * </pre> */ protected void parseInfoLine(String info, UCIInfoListener listener) { if (!isProcessingGo() || Thread.holdsLock(stopSynch)) return; RaptorStringTokenizer tok = new RaptorStringTokenizer(info, " ", true); tok.nextToken(); int currentMoveNumber = 0; List<UCIInfo> infos = new ArrayList<UCIInfo>(10); String nextType = null; while (tok.hasMoreTokens()) { String type = null; if (nextType != null) { type = nextType; nextType = null; } else { type = tok.nextToken(); } while (!isSupportedInfoType(type) && tok.hasMoreTokens()) { type = tok.nextToken(); } if (!isSupportedInfoType(type)) { break; } if (type.equalsIgnoreCase("depth")) { DepthInfo depthInfo = new DepthInfo(); depthInfo.setSearchDepthPlies(Integer.parseInt(tok.nextToken())); infos.add(depthInfo); } else if (type.equalsIgnoreCase("seldepth")) { SelectiveSearchDepthInfo ssDepthInfo = new SelectiveSearchDepthInfo(); ssDepthInfo.setDepthInPlies(Integer.parseInt(tok.nextToken())); infos.add(ssDepthInfo); } else if (type.equalsIgnoreCase("time")) { TimeInfo timeInfo = new TimeInfo(); timeInfo.setTimeMillis(Integer.parseInt(tok.nextToken())); infos.add(timeInfo); } else if (type.equalsIgnoreCase("nodes")) { NodesSearchedInfo nodesSearched = new NodesSearchedInfo(); nodesSearched.setNodesSearched(Integer.parseInt(tok.nextToken())); infos.add(nodesSearched); } else if (type.equalsIgnoreCase("pv")) { BestLineFoundInfo bestLineFoundInfo = new BestLineFoundInfo(); String currentMove = tok.nextToken(); List<UCIMove> currentLine = new ArrayList<UCIMove>(10); while (true) { currentLine.add(new UCIMove(currentMove)); if (tok.hasMoreTokens()) { currentMove = tok.nextToken(); if (isSupportedInfoType(currentMove)) { nextType = currentMove; break; } } else { break; } } bestLineFoundInfo.setMoves(currentLine.toArray(new UCIMove[0])); infos.add(bestLineFoundInfo); } else if (type.equalsIgnoreCase("multipv")) { tok.nextToken(); } else if (type.equalsIgnoreCase("score")) { ScoreInfo scoreInfo = new ScoreInfo(); String nextToken = tok.nextToken(); while (true) { if (nextToken.equalsIgnoreCase("cp")) { scoreInfo.setValueInCentipawns(Integer.parseInt(tok.nextToken())); } else if (nextToken.equalsIgnoreCase("mate")) { scoreInfo.setMateInMoves(Integer.parseInt(tok.nextToken())); } else if (nextToken.equalsIgnoreCase("lowerbound")) { scoreInfo.setLowerBoundScore(true); } else if (nextToken.equalsIgnoreCase("upperbound")) { scoreInfo.setUpperBoundScore(true); } else { nextType = nextToken; break; } if (tok.hasMoreTokens()) { nextToken = tok.nextToken(); } else { break; } } infos.add(scoreInfo); } else if (type.equalsIgnoreCase("currmove")) { CurrentMoveInfo currentMoveInfo = new CurrentMoveInfo(); currentMoveInfo.setMove(parseUCIMove(tok.nextToken())); currentMoveInfo.setMoveNumber(currentMoveNumber); infos.add(currentMoveInfo); } else if (type.equalsIgnoreCase("currentmovenumber")) { currentMoveNumber = Integer.parseInt(tok.nextToken()); } else if (type.equalsIgnoreCase("hashfull")) { tok.nextToken(); } else if (type.equalsIgnoreCase("nps")) { NodesPerSecondInfo nodesPerSecInfo = new NodesPerSecondInfo(); nodesPerSecInfo.setNodesPerSecond(Integer.parseInt(tok.nextToken())); infos.add(nodesPerSecInfo); } else if (type.equalsIgnoreCase("tbhits")) { TableBaseHitsInfo tbInfo = new TableBaseHitsInfo(); tbInfo.setNumberOfHits(Integer.parseInt(tok.nextToken())); infos.add(tbInfo); } else if (type.equalsIgnoreCase("cpuload")) { CPULoadInfo cpuInfo = new CPULoadInfo(); cpuInfo.setCpuUsage(Integer.parseInt(tok.nextToken())); infos.add(cpuInfo); } else if (type.equalsIgnoreCase("string")) { StringInfo stringInfo = new StringInfo(); stringInfo.setValue(tok.getWhatsLeft().trim()); infos.add(stringInfo); } else { } } listener.engineSentInfo(infos.toArray(new UCIInfo[infos.size()])); }
From source file:com.clark.func.Functions.java
public static boolean isCurrentHoldsLock(Object lockObj) { return Thread.holdsLock(lockObj); }
From source file:com.vmware.aurora.vc.VcVirtualMachineImpl.java
private void waitForPowerStateToSync(VirtualMachine.PowerState state, int timeout) throws Exception { AuAssert.check(Thread.holdsLock(this)); VirtualMachine vm = getManagedObject(); try {/*from w ww . j a v a 2 s . co m*/ while (timeout > 0) { runtime = checkReady(vm.getRuntime()); // If runtime state matches the claimed power state, done. if (runtime.getPowerState() == state) { return; } logger.info("syncing power state " + state + " on " + this); timeout -= 10; wait(10 * 1000); } } catch (InterruptedException e) { // break out if the thread is interrupted } }
From source file:com.cloudera.impala.catalog.CatalogServiceCatalog.java
/** * Drops the partition specified in 'partitionSpec' from 'tbl'. Throws a * CatalogException if 'tbl' is not an HdfsTable. If the partition having the given * partition spec does not exist, null is returned. Otherwise, the modified table is * returned.// w w w.j a v a 2 s .c o m */ public Table dropPartition(Table tbl, List<TPartitionKeyValue> partitionSpec) throws CatalogException { Preconditions.checkNotNull(tbl); Preconditions.checkNotNull(partitionSpec); Preconditions.checkState(Thread.holdsLock(tbl)); if (!(tbl instanceof HdfsTable)) { throw new CatalogException("Table " + tbl.getFullName() + " is not an Hdfs table"); } HdfsTable hdfsTable = (HdfsTable) tbl; if (hdfsTable.dropPartition(partitionSpec) == null) return null; return hdfsTable; }
From source file:com.zimbra.cs.db.DbMailItem.java
public static TypedIdList listItems(Folder folder, long messageSyncStart, MailItem.Type type, boolean descending, boolean older) throws ServiceException { Mailbox mbox = folder.getMailbox();// w ww .ja v a 2 s .co m assert Db.supports(Db.Capability.ROW_LEVEL_LOCKING) || Thread.holdsLock(mbox); TypedIdList result = new TypedIdList(); DbConnection conn = mbox.getOperationConnection(); PreparedStatement stmt = null; ResultSet rs = null; try { if (older) { stmt = conn.prepareStatement("SELECT id, type, uuid FROM " + getMailItemTableName(folder) + " WHERE " + IN_THIS_MAILBOX_AND + " type = ? AND folder_id = ? AND date < ?" + " ORDER BY date" + (descending ? " DESC" : "")); } else { stmt = conn.prepareStatement("SELECT id, type, uuid FROM " + getMailItemTableName(folder) + " WHERE " + IN_THIS_MAILBOX_AND + " type = ? AND folder_id = ? AND date >= ?" + " ORDER BY date" + (descending ? " DESC" : "")); } int pos = 1; pos = setMailboxId(stmt, mbox, pos); stmt.setByte(pos++, type.toByte()); stmt.setInt(pos++, folder.getId()); stmt.setLong(pos++, messageSyncStart); rs = stmt.executeQuery(); while (rs.next()) { MailItem.Type dataType = MailItem.Type.of(rs.getByte(2)); result.add(dataType, rs.getInt(1), rs.getString(3)); } return result; } catch (SQLException e) { throw ServiceException.FAILURE("fetching item list for folder " + folder.getId(), e); } finally { DbPool.closeResults(rs); DbPool.closeStatement(stmt); } }
From source file:com.zimbra.cs.db.DbMailItem.java
/** * Return the conversation ids corresponds to messages with given cutoff time. * @param folder/*from w w w .jav a 2 s . c o m*/ * @param messageSyncStart * @param type * @param descending * @param older * @return * @throws ServiceException */ public static TypedIdList listConvItems(Folder folder, long messageSyncStart, MailItem.Type type, boolean descending, boolean older) throws ServiceException { Mailbox mbox = folder.getMailbox(); assert Db.supports(Db.Capability.ROW_LEVEL_LOCKING) || Thread.holdsLock(mbox); TypedIdList result = new TypedIdList(); DbConnection conn = mbox.getOperationConnection(); PreparedStatement stmt = null; ResultSet rs = null; try { if (older) { stmt = conn.prepareStatement( "SELECT parent_id FROM " + getMailItemTableName(folder) + " WHERE " + IN_THIS_MAILBOX_AND + " type = ? AND date < ?" + " ORDER BY date" + (descending ? " DESC" : "")); } else { stmt = conn.prepareStatement( "SELECT parent_id FROM " + getMailItemTableName(folder) + " WHERE " + IN_THIS_MAILBOX_AND + " type = ? AND date >= ?" + " ORDER BY date" + (descending ? " DESC" : "")); } int pos = 1; pos = setMailboxId(stmt, mbox, pos); stmt.setByte(pos++, MailItem.Type.MESSAGE.toByte()); //message's parent_id is always conversation.. stmt.setLong(pos++, messageSyncStart); rs = stmt.executeQuery(); while (rs.next()) { int id = rs.getInt(1); if (id != 0 && !result.contains(id)) { result.add(MailItem.Type.CONVERSATION, id, ""); } } return result; } catch (SQLException e) { throw ServiceException.FAILURE("fetching item list for folder " + folder.getId(), e); } finally { DbPool.closeResults(rs); DbPool.closeStatement(stmt); } }
From source file:org.apache.geode.distributed.internal.membership.gms.membership.GMSJoinLeave.java
/** * Transitions this member into the coordinator role. This must be invoked under a synch on * viewInstallationLock that was held at the time the decision was made to become coordinator so * that the decision is atomic with actually becoming coordinator. * * @param oldCoordinator may be null//from w w w . j a va2s. c o m */ private void becomeCoordinator(InternalDistributedMember oldCoordinator) { assert Thread.holdsLock(viewInstallationLock); if (isCoordinator) { return; } logger.info("This member is becoming the membership coordinator with address {}", localAddress); isCoordinator = true; org.apache.geode.distributed.internal.membership.gms.interfaces.Locator locator = services.getLocator(); if (locator != null) { locator.setIsCoordinator(true); } if (currentView == null) { // create the initial membership view NetView newView = new NetView(this.localAddress); newView.setFailureDetectionPort(localAddress, services.getHealthMonitor().getFailureDetectionPort()); this.localAddress.setVmViewId(0); installView(newView); isJoined = true; createAndStartViewCreator(newView); startViewBroadcaster(); } else { // create and send out a new view NetView newView = addMemberToNetView(oldCoordinator); createAndStartViewCreator(newView); startViewBroadcaster(); } }
From source file:org.apache.hadoop.dfs.FSNamesystem.java
private void updateStats(DatanodeDescriptor node, boolean isAdded) { ///* w w w. jav a2 s . c o m*/ // The statistics are protected by the heartbeat lock // assert (Thread.holdsLock(heartbeats)); if (isAdded) { capacityTotal += node.getCapacity(); capacityUsed += node.getDfsUsed(); capacityRemaining += node.getRemaining(); totalLoad += node.getXceiverCount(); } else { capacityTotal -= node.getCapacity(); capacityUsed -= node.getDfsUsed(); capacityRemaining -= node.getRemaining(); totalLoad -= node.getXceiverCount(); } }
From source file:org.apache.hadoop.hdfs.server.namenode.FSEditLog.java
private long beginTransaction() { assert Thread.holdsLock(this); // get a new transactionId txid++;/*from w w w .ja v a2 s . co m*/ // // record the transactionId when new data was written to the edits log // TransactionId id = myTransactionId.get(); id.txid = txid; return monotonicNow(); }
From source file:org.apache.hadoop.hdfs.server.namenode.FSEditLog.java
private void endTransaction(long start) { assert Thread.holdsLock(this); // update statistics long end = monotonicNow(); numTransactions++;/*from w ww.j av a2 s .c o m*/ totalTimeTransactions += (end - start); if (metrics != null) // Metrics is non-null only when used inside name node metrics.addTransaction(end - start); }