List of usage examples for java.util Stack push
public E push(E item)
From source file:cf.adriantodt.utils.HTML2Discord.java
public static String toPlainText(String discordFormatMessage) { String strippedContent;/*from w w w.ja va 2 s. co m*/ //all the formatting keys to keep track of String[] keys = new String[] { "*", "_", "`", "~~" }; //find all tokens (formatting strings described above) TreeSet<FormatToken> tokens = new TreeSet<>((t1, t2) -> Integer.compare(t1.start, t2.start)); for (String key : keys) { Matcher matcher = Pattern.compile(Pattern.quote(key)).matcher(discordFormatMessage); while (matcher.find()) { tokens.add(new FormatToken(key, matcher.start())); } } //iterate over all tokens, find all matching pairs, and add them to the list toRemove Stack<FormatToken> stack = new Stack<>(); List<FormatToken> toRemove = new ArrayList<>(); boolean inBlock = false; for (FormatToken token : tokens) { if (stack.empty() || !stack.peek().format.equals(token.format) || stack.peek().start + token.format.length() == token.start) { //we are at opening tag if (!inBlock) { //we are outside of block -> handle normally if (token.format.equals("`")) { //block start... invalidate all previous tags stack.clear(); inBlock = true; } stack.push(token); } else if (token.format.equals("`")) { //we are inside of a block -> handle only block tag stack.push(token); } } else if (!stack.empty()) { //we found a matching close-tag toRemove.add(stack.pop()); toRemove.add(token); if (token.format.equals("`") && stack.empty()) { //close tag closed the block inBlock = false; } } } //sort tags to remove by their start-index and iteratively build the remaining string Collections.sort(toRemove, (t1, t2) -> Integer.compare(t1.start, t2.start)); StringBuilder out = new StringBuilder(); int currIndex = 0; for (FormatToken formatToken : toRemove) { if (currIndex < formatToken.start) { out.append(discordFormatMessage.substring(currIndex, formatToken.start)); } currIndex = formatToken.start + formatToken.format.length(); } if (currIndex < discordFormatMessage.length()) { out.append(discordFormatMessage.substring(currIndex)); } //return the stripped text, escape all remaining formatting characters (did not have matching open/close before or were left/right of block strippedContent = out.toString().replace("*", "\\*").replace("_", "\\_").replace("~", "\\~"); return strippedContent; }
From source file:com.wavemaker.json.JSONMarshaller.java
/** * doMarshal() returns some status Objects. * //from w w w .j a va 2 s . c o m * CYCLE_DETECTED_OBJECT will be returned if a cycle was detected at a lower level, and this level needs to be not * written. * * fieldDefinition should never be null; its enclosed typeDefinition may very well be null. */ protected static Object doMarshal(Writer writer, Object obj, Object root, JSONState js, boolean sort, boolean topLevel, Stack<Object> touchedObjects, Stack<String> propertyNames, FieldDefinition fieldDefinition, int arrayLevel, TypeState typeState, boolean prettyPrint, int level, Logger logger) throws IOException { if (fieldDefinition == null) { throw new NullArgumentException("fieldDefinition"); } touchedObjects.push(obj); try { if (obj != null && fieldDefinition.getTypeDefinition() == null) { fieldDefinition = ReflectTypeUtils.getFieldDefinition(obj.getClass(), typeState, false, null); arrayLevel = 0; } // do value conversion if (js.getValueTransformer() != null) { Tuple.Three<Object, FieldDefinition, Integer> tuple = js.getValueTransformer().transformToJSON(obj, fieldDefinition, arrayLevel, root, getPropertyName(propertyNames, js), js.getTypeState()); if (tuple != null) { obj = tuple.v1; fieldDefinition = tuple.v2; arrayLevel = tuple.v3; } } if (arrayLevel == fieldDefinition.getDimensions() && fieldDefinition.getTypeDefinition() != null && fieldDefinition.getTypeDefinition() instanceof WriteObjectConverter) { ((WriteObjectConverter) fieldDefinition.getTypeDefinition()).writeObject(obj, root, getPropertyName(propertyNames, js), writer); } else if (obj == null) { writer.write("null"); // handle arrays & Collections } else if (arrayLevel < fieldDefinition.getDimensions() || obj.getClass().isArray()) { writer.write("["); boolean firstElement = true; if (obj instanceof Collection) { for (Object elem : (Collection<?>) obj) { if (!firstElement) { writer.write(","); if (prettyPrint) { writer.write(" "); } } doMarshal(writer, elem, root, js, sort, false, touchedObjects, propertyNames, fieldDefinition, arrayLevel + 1, typeState, prettyPrint, level, logger); if (firstElement) { firstElement = false; } } } else if (obj.getClass().isArray()) { int length = Array.getLength(obj); Object elem; for (int i = 0; i < length; i++) { elem = Array.get(obj, i); if (!firstElement) { writer.write(","); if (prettyPrint) { writer.write(" "); } } doMarshal(writer, elem, root, js, sort, false, touchedObjects, propertyNames, fieldDefinition, arrayLevel + 1, typeState, prettyPrint, level, logger); if (firstElement) { firstElement = false; } } } else { throw new WMRuntimeException(MessageResource.JSON_UNKNOWN_COLL_OR_ARRAY, obj, obj.getClass()); } writer.write("]"); // check for primitives } else if (fieldDefinition.getTypeDefinition() != null && fieldDefinition.getTypeDefinition() instanceof PrimitiveTypeDefinition) { ((PrimitiveTypeDefinition) fieldDefinition.getTypeDefinition()).toJson(writer, obj); // handle maps & objects } else { handleObject(obj, root, js, writer, touchedObjects, propertyNames, sort, fieldDefinition, arrayLevel, typeState, prettyPrint, level, logger); } return null; } finally { touchedObjects.pop(); } }
From source file:com.espertech.esper.rowregex.EventRowRegexHelper.java
private static RegexNFAStrand recursiveBuildStatesInternal(RowRegexExprNode node, Map<String, ExprNode> variableDefinitions, Map<String, Pair<Integer, Boolean>> variableStreams, Stack<Integer> nodeNumStack) { if (node instanceof RowRegexExprNodeAlteration) { int nodeNum = 0; List<RegexNFAStateBase> cumulativeStartStates = new ArrayList<RegexNFAStateBase>(); List<RegexNFAStateBase> cumulativeStates = new ArrayList<RegexNFAStateBase>(); List<RegexNFAStateBase> cumulativeEndStates = new ArrayList<RegexNFAStateBase>(); boolean isPassthrough = false; for (RowRegexExprNode child : node.getChildNodes()) { nodeNumStack.push(nodeNum); RegexNFAStrand strand = recursiveBuildStatesInternal(child, variableDefinitions, variableStreams, nodeNumStack);//from ww w .jav a 2s .co m nodeNumStack.pop(); cumulativeStartStates.addAll(strand.getStartStates()); cumulativeStates.addAll(strand.getAllStates()); cumulativeEndStates.addAll(strand.getEndStates()); if (strand.isPassthrough()) { isPassthrough = true; } nodeNum++; } return new RegexNFAStrand(cumulativeStartStates, cumulativeEndStates, cumulativeStates, isPassthrough); } else if (node instanceof RowRegexExprNodeConcatenation) { int nodeNum = 0; boolean isPassthrough = true; List<RegexNFAStateBase> cumulativeStates = new ArrayList<RegexNFAStateBase>(); RegexNFAStrand[] strands = new RegexNFAStrand[node.getChildNodes().size()]; for (RowRegexExprNode child : node.getChildNodes()) { nodeNumStack.push(nodeNum); strands[nodeNum] = recursiveBuildStatesInternal(child, variableDefinitions, variableStreams, nodeNumStack); nodeNumStack.pop(); cumulativeStates.addAll(strands[nodeNum].getAllStates()); if (!strands[nodeNum].isPassthrough()) { isPassthrough = false; } nodeNum++; } // determine start states: all states until the first non-passthrough start state List<RegexNFAStateBase> startStates = new ArrayList<RegexNFAStateBase>(); for (int i = 0; i < strands.length; i++) { startStates.addAll(strands[i].getStartStates()); if (!strands[i].isPassthrough()) { break; } } // determine end states: all states from the back until the last non-passthrough end state List<RegexNFAStateBase> endStates = new ArrayList<RegexNFAStateBase>(); for (int i = strands.length - 1; i >= 0; i--) { endStates.addAll(strands[i].getEndStates()); if (!strands[i].isPassthrough()) { break; } } // hook up the end state of each strand with the start states of each next strand for (int i = strands.length - 1; i >= 1; i--) { RegexNFAStrand current = strands[i]; for (int j = i - 1; j >= 0; j--) { RegexNFAStrand prior = strands[j]; for (RegexNFAStateBase endState : prior.getEndStates()) { for (RegexNFAStateBase startState : current.getStartStates()) { endState.addState(startState); } } if (!prior.isPassthrough()) { break; } } } return new RegexNFAStrand(startStates, endStates, cumulativeStates, isPassthrough); } else if (node instanceof RowRegexExprNodeNested) { RowRegexExprNodeNested nested = (RowRegexExprNodeNested) node; nodeNumStack.push(0); RegexNFAStrand strand = recursiveBuildStatesInternal(node.getChildNodes().get(0), variableDefinitions, variableStreams, nodeNumStack); nodeNumStack.pop(); boolean isPassthrough = strand.isPassthrough() || nested.getType().isOptional(); // if this is a repeating node then pipe back each end state to each begin state if (nested.getType().isMultipleMatches()) { for (RegexNFAStateBase endstate : strand.getEndStates()) { for (RegexNFAStateBase startstate : strand.getStartStates()) { if (!endstate.getNextStates().contains(startstate)) { endstate.getNextStates().add(startstate); } } } } return new RegexNFAStrand(strand.getStartStates(), strand.getEndStates(), strand.getAllStates(), isPassthrough); } else { RowRegexExprNodeAtom atom = (RowRegexExprNodeAtom) node; // assign stream number for single-variables for most direct expression eval; multiple-variable gets -1 int streamNum = variableStreams.get(atom.getTag()).getFirst(); boolean multiple = variableStreams.get(atom.getTag()).getSecond(); ExprNode expressionDef = variableDefinitions.get(atom.getTag()); RegexNFAStateBase nextState; if ((atom.getType() == RegexNFATypeEnum.ZERO_TO_MANY) || (atom.getType() == RegexNFATypeEnum.ZERO_TO_MANY_RELUCTANT)) { nextState = new RegexNFAStateZeroToMany(toString(nodeNumStack), atom.getTag(), streamNum, multiple, atom.getType().isGreedy(), expressionDef); } else if ((atom.getType() == RegexNFATypeEnum.ONE_TO_MANY) || (atom.getType() == RegexNFATypeEnum.ONE_TO_MANY_RELUCTANT)) { nextState = new RegexNFAStateOneToMany(toString(nodeNumStack), atom.getTag(), streamNum, multiple, atom.getType().isGreedy(), expressionDef); } else if ((atom.getType() == RegexNFATypeEnum.ONE_OPTIONAL) || (atom.getType() == RegexNFATypeEnum.ONE_OPTIONAL_RELUCTANT)) { nextState = new RegexNFAStateOneOptional(toString(nodeNumStack), atom.getTag(), streamNum, multiple, atom.getType().isGreedy(), expressionDef); } else if (expressionDef == null) { nextState = new RegexNFAStateAnyOne(toString(nodeNumStack), atom.getTag(), streamNum, multiple); } else { nextState = new RegexNFAStateFilter(toString(nodeNumStack), atom.getTag(), streamNum, multiple, expressionDef); } return new RegexNFAStrand(Collections.singletonList(nextState), Collections.singletonList(nextState), Collections.singletonList(nextState), atom.getType().isOptional()); } }
From source file:org.alfresco.web.bean.wcm.AVMUtil.java
/** * Creates all directories for a path if they do not already exist. *///from www. ja va 2 s . c o m public static void makeAllDirectories(final String avmDirectoryPath) { final AVMService avmService = getAVMService(); // LOGGER.debug("mkdir -p " + avmDirectoryPath); String s = avmDirectoryPath; final Stack<String[]> dirNames = new Stack<String[]>(); while (s != null) { try { if (avmService.lookup(-1, s) != null) { // LOGGER.debug("path " + s + " exists"); break; } } catch (AVMNotFoundException avmfe) { } final String[] sb = AVMNodeConverter.SplitBase(s); s = sb[0]; // LOGGER.debug("pushing " + sb[1]); dirNames.push(sb); } while (!dirNames.isEmpty()) { final String[] sb = dirNames.pop(); // LOGGER.debug("creating " + sb[1] + " in " + sb[0]); avmService.createDirectory(sb[0], sb[1]); } }
From source file:com.amalto.core.storage.StorageMetadataUtils.java
private static void __paths(ComplexTypeMetadata type, FieldMetadata target, Stack<FieldMetadata> currentPath, Set<List<FieldMetadata>> foundPaths, Set<TypeMetadata> processedTypes) { if (Storage.PROJECTION_TYPE.equals(type.getName()) && type.hasField(target.getName())) { currentPath.push(type.getField(target.getName())); }/*from w w w . j av a 2s. com*/ // Collection<FieldMetadata> fields = type.getFields(); for (FieldMetadata current : fields) { currentPath.push(current); if (current.equals(target)) { foundPaths.add(new ArrayList<FieldMetadata>(currentPath)); } if (current instanceof ContainedTypeFieldMetadata) { ComplexTypeMetadata containedType = ((ContainedTypeFieldMetadata) current).getContainedType(); _paths(containedType, target, currentPath, foundPaths, processedTypes); for (ComplexTypeMetadata subType : containedType.getSubTypes()) { for (FieldMetadata field : subType.getFields()) { if (field.getDeclaringType().equals(subType)) { _paths(subType, target, currentPath, foundPaths, processedTypes); } } } } else if (current instanceof ReferenceFieldMetadata) { ComplexTypeMetadata referencedType = ((ReferenceFieldMetadata) current).getReferencedType(); if (!referencedType.isInstantiable()) { if (processedTypes.contains(referencedType)) { Collection<FieldMetadata> tempFields = referencedType.getFields(); for (FieldMetadata tempCurrent : tempFields) { if (tempCurrent.equals(target)) { currentPath.push(tempCurrent); foundPaths.add(new ArrayList<FieldMetadata>(currentPath)); currentPath.pop(); } } } _paths(referencedType, target, currentPath, foundPaths, processedTypes); for (ComplexTypeMetadata subType : referencedType.getSubTypes()) { for (FieldMetadata field : subType.getFields()) { if (field.getDeclaringType() == subType) { _paths(subType, target, currentPath, foundPaths, processedTypes); } } } } } currentPath.pop(); } }
From source file:com.runwaysdk.generation.loader.RunwayClassLoader.java
/** * Loads an array from the base component up. If an array is loaded without * its componentType already loaded then an error occurs. Thus it loads from * inside out.//from ww w . ja v a 2 s.c o m * * @param arrayType */ public static Class<?> loadArray(String arrayType) { // Keep track of what types of array we have (an array of Integers and an // array of // Business objects, for example) Stack<String> baseTypes = new Stack<String>(); String baseType = arrayType; Class<?> arrayClass = null; // This loop strips the base type out of any n-dimensional array while (arrayPattern.matcher(baseType).matches()) { if (arrayPrefix.matcher(baseType).matches()) { baseType = baseType.replaceFirst("\\[L", "").replace(";", "").trim(); } else { baseType = baseType.replaceFirst("\\[", ""); } // Add the base type to the stack baseTypes.push(baseType); } // We must load all base types before we can try to load arrays of those // types while (!baseTypes.isEmpty()) { String type = baseTypes.pop(); Class<?> componentType; componentType = LoaderDecorator.load(type); arrayClass = Array.newInstance(componentType, 0).getClass(); } return arrayClass; }
From source file:edu.umn.cs.spatialHadoop.operations.ConvexHull.java
/** * Computes the convex hull of a set of points using a divide and conquer * in-memory algorithm. This function implements Andrew's modification to * the Graham scan algorithm.// w ww. j a v a 2s .c o m * * @param points * @return */ public static <P extends Point> P[] convexHullInMemory(P[] points) { Stack<P> s1 = new Stack<P>(); Stack<P> s2 = new Stack<P>(); Arrays.sort(points); // Lower chain for (int i = 0; i < points.length; i++) { while (s1.size() > 1) { P p1 = s1.get(s1.size() - 2); P p2 = s1.get(s1.size() - 1); P p3 = points[i]; double crossProduct = (p2.x - p1.x) * (p3.y - p1.y) - (p2.y - p1.y) * (p3.x - p1.x); if (crossProduct <= 0) s1.pop(); else break; } s1.push(points[i]); } // Upper chain for (int i = points.length - 1; i >= 0; i--) { while (s2.size() > 1) { P p1 = s2.get(s2.size() - 2); P p2 = s2.get(s2.size() - 1); P p3 = points[i]; double crossProduct = (p2.x - p1.x) * (p3.y - p1.y) - (p2.y - p1.y) * (p3.x - p1.x); if (crossProduct <= 0) s2.pop(); else break; } s2.push(points[i]); } s1.pop(); s2.pop(); s1.addAll(s2); return s1.toArray((P[]) Array.newInstance(s1.firstElement().getClass(), s1.size())); }
From source file:com.nextep.designer.sqlclient.ui.helpers.SQLHelper.java
private static DMLParseResult parseSQL(String sql, int start) { final ISQLParser parser = GeneratorFactory.getSQLParser(DBGMHelper.getCurrentVendor()); // Retrieving the corresponding statement start IDocument doc = new Document(); doc.set(sql + " "); //$NON-NLS-1$ FindReplaceDocumentAdapter finder = new FindReplaceDocumentAdapter(doc); try {/*from w w w . j a va 2 s . c o m*/ IRegion lastSemicolonRegion = finder.find(start - 1, ";", false, false, false, false); //$NON-NLS-1$ if (lastSemicolonRegion == null) { lastSemicolonRegion = new Region(0, 1); } IRegion selectRegion = finder.find(lastSemicolonRegion.getOffset(), "SELECT|INSERT|UPDATE|DELETE", true, //$NON-NLS-1$ false, false, true); IRegion endSemicolonRegion = finder.find(start == doc.getLength() ? start - 1 : start, ";", true, false, //$NON-NLS-1$ false, false); if (endSemicolonRegion == null) { endSemicolonRegion = new Region(doc.getLength() - 1, 0); } if (selectRegion == null || lastSemicolonRegion == null || endSemicolonRegion == null) { return null; } // The select must be found after the first semicolon, else it is not the // same SQL statement if (selectRegion.getOffset() >= lastSemicolonRegion.getOffset() && endSemicolonRegion.getOffset() >= selectRegion.getOffset()) { DMLScanner scanner = new DMLScanner(parser); scanner.setRange(doc, selectRegion.getOffset(), endSemicolonRegion.getOffset() - selectRegion.getOffset()); IToken token = scanner.nextToken(); DMLParseResult result = new DMLParseResult(); Stack<DMLParseResult> stack = new Stack<DMLParseResult>(); Map<Segment, DMLParseResult> results = new HashMap<Segment, DMLParseResult>(); while (!token.isEOF()) { // Counting parenthethis if (token == DMLScanner.LEFTPAR_TOKEN) { result.parCount++; } else if (token == DMLScanner.RIGHTPAR_TOKEN) { result.parCount--; } if (token == DMLScanner.SELECT_TOKEN) { // && (result.tableSegStart>0 || // result.whereSegStart>0)) { stack.push(result); result = new DMLParseResult(); result.stackStart = scanner.getTokenOffset(); } else if (token == DMLScanner.RIGHTPAR_TOKEN && result.parCount < 0) { // && // stack.size()>0) // { results.put(new Segment(result.stackStart, scanner.getTokenOffset() - result.stackStart), result); result = stack.pop(); } else if (token == DMLScanner.INSERT_TOKEN) { result.ignoreInto = false; } else if (token == DMLScanner.FROM_TOKEN || token == DMLScanner.UPDATE_TOKEN || (token == DMLScanner.INTO_TOKEN && !result.ignoreInto)) { result.ignoreInto = true; // We have a table segment start result.tableSegStart = scanner.getTokenOffset(); result.tableStartToken = token; } else if (token == DMLScanner.WORD_TOKEN && result.tableSegStart > 0) { // We are in a table segment so we instantiate appropriate table references // and aliases // in the parse result if (result.lastAlias == null) { // This is a new table definition, we add it result.lastAlias = new TableAlias( doc.get(scanner.getTokenOffset(), scanner.getTokenLength()).toUpperCase()); // result.lastAlias // .setTable(tablesMap.get(result.lastAlias.getTableName())); result.addFromTable(result.lastAlias); } else if (result.lastAlias.getTableAlias() == null) { // This is an alias of a defined table final String alias = doc.get(scanner.getTokenOffset(), scanner.getTokenLength()); final List<String> reservedWords = parser.getTypedTokens().get(ISQLParser.DML); if (!reservedWords.contains(alias.toUpperCase())) { result.lastAlias.setAlias(alias); } else { result.lastAlias = null; } } } else if (token == DMLScanner.COMMA_TOKEN) { // On a comma, we reset any table reference result.lastAlias = null; } else if (token == DMLScanner.DML_TOKEN) { result.lastAlias = null; if (result.tableSegStart != -1) { int tableSegEnd = scanner.getTokenOffset(); result.addTableSegment( new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart)); result.tableSegStart = -1; } } else if (result.tableSegStart != -1 && ((result.tableStartToken == DMLScanner.FROM_TOKEN && token == DMLScanner.WHERE_TOKEN) || (result.tableStartToken == DMLScanner.UPDATE_TOKEN && token == DMLScanner.SET_TOKEN) || (result.tableStartToken == DMLScanner.INTO_TOKEN && token == DMLScanner.LEFTPAR_TOKEN))) { // We have matched a table segment end, so we close the segment // and we add it to the parse result's table segments int tableSegEnd = scanner.getTokenOffset(); result.addTableSegment( new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart)); result.tableSegStart = -1; if (token == DMLScanner.WHERE_TOKEN) { result.whereSegStart = scanner.getTokenOffset() + scanner.getTokenLength(); } } token = scanner.nextToken(); } // If the table segment is still opened, we close it at the end of the SQL statement if (result.tableSegStart > -1) { int tableSegEnd = endSemicolonRegion.getOffset(); result.addTableSegment( new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart + 1)); } // Locating the appropriate result for (Segment s : results.keySet()) { if (s.getOffset() <= start && s.getOffset() + s.getLength() > start) { return results.get(s); } } return result; } } catch (BadLocationException e) { LOGGER.debug("Problems while retrieving SQL statement"); } return null; }
From source file:jp.terasoluna.fw.batch.util.BatchUtil.java
/** * ??//from w ww .j ava 2s . c om * @param tranMap * @param statMap * @param log */ public static void commitTransactions(Map<?, ?> tranMap, Map<String, TransactionStatus> statMap, Log log) { Set<Entry<String, TransactionStatus>> statSet = statMap.entrySet(); if (statSet.isEmpty()) { return; } Stack<Entry<String, TransactionStatus>> stack = new Stack<Entry<String, TransactionStatus>>(); for (Entry<String, TransactionStatus> stat : statSet) { stack.push(stat); } while (!stack.isEmpty()) { // ??? Entry<String, TransactionStatus> statEntry = stack.pop(); String key = statEntry.getKey(); TransactionStatus trnStat = statEntry.getValue(); if (trnStat == null) { continue; } // ??? Object ptmObj = tranMap.get(key); if (ptmObj == null || !(ptmObj instanceof PlatformTransactionManager)) { continue; } PlatformTransactionManager ptm = (PlatformTransactionManager) ptmObj; if (log != null && log.isDebugEnabled()) { logDebug(log, LogId.DAL025038, trnStat); } // ptm.commit(trnStat); } }
From source file:jp.terasoluna.fw.batch.util.BatchUtil.java
/** * ???/*from w w w. j a va 2 s .c o m*/ * @param tranMap PlatformTransactionManager * @param statMap TransactionStatus * @param log Log * @return ???PlatformTransactionManager?????????true?? */ public static boolean endTransactions(Map<?, ?> tranMap, Map<String, TransactionStatus> statMap, Log log) { boolean isNormal = true; Set<Entry<String, TransactionStatus>> statSet = statMap.entrySet(); if (statSet == null || statSet.isEmpty()) { return isNormal; } Stack<Entry<String, TransactionStatus>> stack = new Stack<Entry<String, TransactionStatus>>(); for (Entry<String, TransactionStatus> stat : statSet) { stack.push(stat); } while (!stack.isEmpty()) { // ??? Entry<String, TransactionStatus> statEntry = stack.pop(); String key = statEntry.getKey(); TransactionStatus trnStat = statEntry.getValue(); if (trnStat == null) { continue; } // ??? Object ptmObj = tranMap.get(key); if (ptmObj == null || !(ptmObj instanceof PlatformTransactionManager)) { continue; } PlatformTransactionManager ptm = (PlatformTransactionManager) ptmObj; // ?????? if (trnStat.isCompleted()) { continue; } if (log != null && log.isDebugEnabled()) { logDebug(log, LogId.DAL025041, trnStat); } // ? try { ptm.rollback(trnStat); } catch (TransactionException e) { if (log != null && log.isErrorEnabled()) { logError(log, LogId.EAL025045, e, key); } isNormal = false; // ???????? } if (log != null && log.isDebugEnabled()) { logDebug(log, LogId.DAL025041, trnStat); } } return isNormal; }