List of usage examples for java.util ArrayList indexOf
public int indexOf(Object o)
From source file:edu.mit.media.funf.probe.Probe.java
/** * Updates request list with items in queue, replacing duplicate pending intents for this probe. * @param requests/*from w w w . j a v a 2 s. c o m*/ */ private void updateRequests(boolean removeRunOnce) { assert requestsIntent != null; boolean hasChanges = false; ArrayList<Intent> requests = requestsIntent.getParcelableArrayListExtra(INTERNAL_REQUESTS_KEY); if (requests == null) { hasChanges = true; requests = new ArrayList<Intent>(); } // Remove run once requests Parameter periodParam = Parameter.getAvailableParameter(getAvailableParameters(), Parameter.Builtin.PERIOD); if (periodParam != null && removeRunOnce) { for (Intent request : requests) { ArrayList<Bundle> dataRequests = Utils.getArrayList(request.getExtras(), REQUESTS_KEY); List<Bundle> runOnceDataRequests = new ArrayList<Bundle>(); for (Bundle dataRequest : dataRequests) { long periodValue = Utils.getLong(dataRequest, Parameter.Builtin.PERIOD.name, (Long) periodParam.getValue()); if (periodValue == 0L) { Log.d(TAG, "Removing run once dataRequest: " + dataRequest); runOnceDataRequests.add(dataRequest); } } dataRequests.removeAll(runOnceDataRequests); if (dataRequests.isEmpty()) { deadRequests.add(request); } else { request.putExtra(REQUESTS_KEY, dataRequests); } } } // Remove all requests that we aren't able to (or supposed to) send to anymore if (!deadRequests.isEmpty()) { hasChanges = true; for (Intent deadRequest = deadRequests.poll(); deadRequest != null; deadRequest = deadRequests.poll()) { Log.d(TAG, "Removing dead request: " + deadRequest); requests.remove(deadRequest); } } // Add any pending requests if (!pendingRequests.isEmpty()) { hasChanges = true; Map<PendingIntent, Intent> existingCallbacksToRequests = new HashMap<PendingIntent, Intent>(); for (Intent existingRequest : requests) { PendingIntent callback = existingRequest.getParcelableExtra(CALLBACK_KEY); existingCallbacksToRequests.put(callback, existingRequest); } for (Intent request = pendingRequests.poll(); request != null; request = pendingRequests.poll()) { PendingIntent callback = request.getParcelableExtra(CALLBACK_KEY); if (packageHasRequiredPermissions(this, callback.getTargetPackage(), getRequiredPermissions())) { existingCallbacksToRequests.containsKey(callback); int existingRequestIndex = requests.indexOf(existingCallbacksToRequests.get(callback)); ArrayList<Bundle> dataRequests = Utils.getArrayList(request.getExtras(), REQUESTS_KEY); Log.d(TAG, "Adding pending intent with data requests: " + dataRequests); if (existingRequestIndex >= 0) { if (dataRequests == null || dataRequests.isEmpty()) { Log.d(TAG, "Adding pending intent, removing because empty or null"); requests.remove(existingRequestIndex); } else { requests.set(existingRequestIndex, request); } } else { if (dataRequests != null && !dataRequests.isEmpty()) { // Only add requests with nonempty data requests Log.d(TAG, "Adding new pending intent: " + request); requests.add(request); } } } else { Log.w(TAG, "Package '" + callback.getTargetPackage() + "' does not have the required permissions to get data from this probe."); } } } if (hasChanges) { requestsIntent.putExtra(INTERNAL_REQUESTS_KEY, requests); updateInternalRequestsPendingIntent(); } }
From source file:mom.trd.opentheso.bdd.helper.ConceptHelper.java
/** * Focntion rcursive pour trouver le chemin complet d'un concept en partant * du Concept lui mme pour arriver la tte on peut rencontrer plusieurs * ttes en remontant, alors on construit chaque fois un chemin complet. * * @param ds/*w w w. ja v a 2s . c o m*/ * @param idConcept * @param idThesaurus * @param firstPath * @param path * @param tabId * @return Vector Ce vecteur contient tous les Path des BT d'un id_terme * exemple (327,368,100,#,2251,5555,54544,8789,#) ici deux path disponible * il faut trouver le path qui correspond au microthesaurus en cours pour * l'afficher en premier */ public ArrayList<ArrayList<String>> getInvertPathOfConcept(HikariDataSource ds, String idConcept, String idThesaurus, ArrayList<String> firstPath, ArrayList<String> path, ArrayList<ArrayList<String>> tabId) { RelationsHelper relationsHelper = new RelationsHelper(); ArrayList<String> resultat = relationsHelper.getListIdBT(ds, idConcept, idThesaurus); if (resultat.size() > 1) { for (String path1 : path) { firstPath.add(path1); } } if (resultat.isEmpty()) { path.add(getGroupIdOfConcept(ds, idConcept, idThesaurus)); ArrayList<String> pathTemp = new ArrayList<>(); for (String path2 : firstPath) { pathTemp.add(path2); } for (String path1 : path) { if (pathTemp.indexOf(path1) == -1) { pathTemp.add(path1); } } tabId.add(pathTemp); path.clear(); } for (String resultat1 : resultat) { path.add(resultat1); getInvertPathOfConcept(ds, resultat1, idThesaurus, firstPath, path, tabId); } return tabId; }
From source file:de.tudarmstadt.tk.statistics.importer.ExternalResultsReader.java
public static SampleData interpretCSV(StatsConfig config, List<String[]> rows, ReportTypes pipelineType, HashMap<String, Integer> pipelineMetadata) { HashMap<Integer, ArrayList<ArrayList<Double>>> samplesPerMeasure = new HashMap<Integer, ArrayList<ArrayList<Double>>>(); //Only remove first line if it is a header line if (rows.size() > 0 && rows.get(0)[6].equals("IsBaseline")) { rows.remove(0);//from www . j a v a2 s . c o m } if (rows.size() > 1) { logger.log(Level.INFO, "Extracting samples and metadata from imported data."); int selectBestN = config.getSelectBestN(); String selectByMeasure = config.getSelectByMeasure(); // Preprocessing: Parse different models (classifier + feature set column) and measures ArrayList<String> measures = new ArrayList<String>(); ArrayList<Pair<String, String>> datasets = new ArrayList<Pair<String, String>>(); ArrayList<Pair<String, String>> models = new ArrayList<Pair<String, String>>(); ArrayList<Pair<String, String>> baselineModels = new ArrayList<Pair<String, String>>(); for (int i = 0; i < rows.size(); i++) { String[] columns = rows.get(i); String classifier = columns[2]; if (classifier.equals("0")) { classifier = "Aggregated"; } String featureSets = columns[3]; Pair<String, String> model = Pair.of(classifier, featureSets); if (!models.contains(model)) { models.add(model); if (!baselineModels.contains(model) && Integer.parseInt(columns[6]) == 1) { baselineModels.add(model); } } if (!measures.contains(columns[4])) { measures.add(columns[4]); } } //Check: Baseline only allowed when > 2 models are evaluated if (models.size() <= 2 && baselineModels.size() > 0) { logger.log(Level.WARN, "At least three models are required to make an evaluation against a baseline meaningful. In the dataset, a baseline was specified for only two models. The baseline indicator will be ignored."); System.err.println( "At least three models are required to make an evaluation against a baseline meaningful. In the dataset, a baseline was specified for only two models. The baseline indicator will be ignored."); baselineModels.clear(); } // Now sort samples according to data Collections.sort(rows, new Helpers.LexicographicArrayComparator()); for (int i = 0; i < rows.size(); i++) { String[] columns = rows.get(i); Pair<String, String> data = null; String trainData = columns[0].trim(); String testData = columns[1].trim(); //If this is a CV, numbers after a dot indicate fold UUIDS, they thus have to be splitted to retain the original dataset name if (pipelineType == ReportTypes.CV) { trainData = trainData.split("\\.")[0]; testData = testData.split("\\.")[0]; } if (trainData.equals(testData)) { data = Pair.of(trainData, null); } else { //columns[1] = columns[1].split(".")[0]; data = Pair.of(trainData, testData); } if (!datasets.contains(data)) { datasets.add(data); } } // Preprocessing: Initialize sample container per measure/model for (int i = 0; i < measures.size(); i++) { ArrayList<ArrayList<Double>> samplesPerModel = new ArrayList<ArrayList<Double>>(); for (int j = 0; j < models.size(); j++) { samplesPerModel.add(new ArrayList<Double>()); } samplesPerMeasure.put(i, samplesPerModel); } // Assign samples to different models for (int i = 0; i < rows.size(); i++) { String[] columns = rows.get(i); String classifier = columns[2]; if (classifier.equals("0")) { classifier = "Aggregated"; } String featureSet = columns[3]; String measure = columns[4]; double value = Double.parseDouble(columns[5]); int measureIndex = measures.indexOf(measure); int modelIndex = models.indexOf(Pair.of(classifier, featureSet)); ArrayList<ArrayList<Double>> sPMeasure = samplesPerMeasure.get(measureIndex); sPMeasure.get(modelIndex).add(value); } // Transform into data format required by the statistical evaluation HashMap<String, ArrayList<ArrayList<Double>>> indexedSamples = new HashMap<String, ArrayList<ArrayList<Double>>>(); HashMap<String, ArrayList<Double>> indexedSamplesAverage = new HashMap<String, ArrayList<Double>>(); Iterator<Integer> it = samplesPerMeasure.keySet().iterator(); while (it.hasNext()) { int measureIndex = it.next(); ArrayList<ArrayList<Double>> samplesPerModel = samplesPerMeasure.get(measureIndex); ArrayList<Double> sampleAverages = new ArrayList<Double>(models.size()); for (int modelIndex = 0; modelIndex < models.size(); modelIndex++) { ArrayList<Double> sample = samplesPerModel.get(modelIndex); double average = 0; for (int j = 0; j < sample.size(); j++) { average += sample.get(j); } average /= sample.size(); sampleAverages.add(average); } indexedSamplesAverage.put(measures.get(measureIndex), sampleAverages); indexedSamples.put(measures.get(measureIndex), samplesPerMeasure.get(measureIndex)); } // Check if data fulfills general requirements: > 5 samples for each model, same number of samples per model it = samplesPerMeasure.keySet().iterator(); while (it.hasNext()) { Integer measureIndex = it.next(); ArrayList<ArrayList<Double>> samplesPerModel = samplesPerMeasure.get(measureIndex); int s = samplesPerModel.get(0).size(); for (int i = 1; i < samplesPerModel.size(); i++) { if (samplesPerModel.get(i).size() < 5) { logger.log(Level.ERROR, "More than 5 samples are needed per model and measure. Aborting."); System.err.println("More than 5 samples are needed per model and measure. Aborting."); System.exit(1); } if (samplesPerModel.get(i).size() != s) { logger.log(Level.ERROR, "Different models are not represented by the same number of samples. Aborting."); System.err.println( "Different models are not represented by the same number of samples. Aborting."); System.exit(1); } } } // Collect remaining data required for creating a SampleData object // Check if data fulfills requirements of the specific PipelineTypes int nFolds = 1; int nRepetitions = 1; switch (pipelineType) { case CV: if (datasets.size() > 1) { System.err.println( "Input data corrupted. More than one dataset specified for Single-Domain Cross-Validation."); logger.log(Level.ERROR, "Input data corrupted. More than one dataset specified for Single-Domain Cross-Validation."); return null; } else if (datasets.get(0).getValue() != null) { System.err.println( "Input data corrupted. Training and Test dataset must be same for Cross-Validation."); logger.log(Level.ERROR, "Input data corrupted. Training and Test dataset must be same for Cross-Validation."); return null; } nFolds = indexedSamples.get(measures.get(0)).get(0).size(); nRepetitions = 1; break; case MULTIPLE_CV: if (datasets.size() > 1) { System.err.println( "Input data corrupted. More than one dataset specified for Single-Domain Cross-Validation."); logger.log(Level.ERROR, "Input data corrupted. More than one dataset specified for Single-Domain Cross-Validation."); return null; } else if (datasets.get(0).getValue() != null) { System.err.println( "Input data corrupted. Training and Test dataset must be same for Cross-Validation."); logger.log(Level.ERROR, "Input data corrupted. Training and Test dataset must be same for Cross-Validation."); return null; } nFolds = pipelineMetadata.get("nFolds"); nRepetitions = indexedSamples.get(measures.get(0)).get(0).size(); break; case CV_DATASET_LVL: nFolds = pipelineMetadata.get("nFolds"); nRepetitions = 1; break; case MULTIPLE_CV_DATASET_LVL: nFolds = pipelineMetadata.get("nFolds"); nRepetitions = pipelineMetadata.get("nRepetitions"); break; case TRAIN_TEST_DATASET_LVL: nFolds = 1; nRepetitions = 1; break; default: System.err.println("Unknown PipelineType. Aborting."); logger.log(Level.ERROR, "Unknown PipelineType. Aborting."); return null; } //Reorder data in case of a baseline evaluation (baseline first) if (baselineModels.size() == 1) { Pair<String, String> baselineModel = baselineModels.get(0); int modelIndex = models.indexOf(baselineModel); models.remove(modelIndex); models.add(0, baselineModel); for (String measure : indexedSamples.keySet()) { ArrayList<Double> s = indexedSamples.get(measure).get(modelIndex); indexedSamples.get(measure).remove(modelIndex); indexedSamples.get(measure).add(0, s); double a = indexedSamplesAverage.get(measure).get(modelIndex); indexedSamplesAverage.get(measure).remove(modelIndex); indexedSamplesAverage.get(measure).add(0, a); } } SampleData sampleData = new SampleData(null, indexedSamples, indexedSamplesAverage, datasets, models, baselineModels, pipelineType, nFolds, nRepetitions); sampleData = Helpers.truncateData(sampleData, selectBestN, selectByMeasure); return sampleData; } return null; }
From source file:org.opencastproject.videoeditor.impl.VideoEditorServiceImpl.java
/** * Splice segments given by smil document for the given track to the new one. * * @param job//from w ww .ja va2s .c o m * processing job * @param smil * smil document with media segments description * @param track * source track * @return processed track * @throws ProcessFailedException * if an error occured */ protected synchronized Track processSmil(Job job, Smil smil, String trackParamGroupId) throws ProcessFailedException { SmilMediaParamGroup trackParamGroup; ArrayList<String> inputfile = new ArrayList<String>(); ArrayList<VideoClip> videoclips = new ArrayList<VideoClip>(); try { trackParamGroup = (SmilMediaParamGroup) smil.get(trackParamGroupId); } catch (SmilException ex) { // can't be thrown, because we found the Id in processSmil(Smil) throw new ProcessFailedException( "Smil does not contain a paramGroup element with Id " + trackParamGroupId); } String sourceTrackId = null; MediaPackageElementFlavor sourceTrackFlavor = null; String sourceTrackUri = null; // get source track metadata for (SmilMediaParam param : trackParamGroup.getParams()) { if (SmilMediaParam.PARAM_NAME_TRACK_ID.equals(param.getName())) { sourceTrackId = param.getValue(); } else if (SmilMediaParam.PARAM_NAME_TRACK_SRC.equals(param.getName())) { sourceTrackUri = param.getValue(); } else if (SmilMediaParam.PARAM_NAME_TRACK_FLAVOR.equals(param.getName())) { sourceTrackFlavor = MediaPackageElementFlavor.parseFlavor(param.getValue()); } } File sourceFile = null; try { sourceFile = workspace.get(new URI(sourceTrackUri)); } catch (IOException ex) { throw new ProcessFailedException("Can't read " + sourceTrackUri); } catch (NotFoundException ex) { throw new ProcessFailedException("Workspace does not contain a track " + sourceTrackUri); } catch (URISyntaxException ex) { throw new ProcessFailedException("Source URI " + sourceTrackUri + " is not valid."); } // inspect input file to retrieve media information Job inspectionJob = null; Track sourceTrack = null; try { inspectionJob = inspect(job, new URI(sourceTrackUri)); sourceTrack = (Track) MediaPackageElementParser.getFromXml(inspectionJob.getPayload()); } catch (URISyntaxException e) { throw new ProcessFailedException("Source URI " + sourceTrackUri + " is not valid."); } catch (MediaInspectionException e) { throw new ProcessFailedException("Media inspection of " + sourceTrackUri + " failed", e); } catch (MediaPackageException e) { throw new ProcessFailedException("Deserialization of source track " + sourceTrackUri + " failed", e); } // get output file extension String outputFileExtension = properties.getProperty(VideoEditorProperties.DEFAULT_EXTENSION, ".mp4"); outputFileExtension = properties.getProperty(VideoEditorProperties.OUTPUT_FILE_EXTENSION, outputFileExtension); if (!outputFileExtension.startsWith(".")) { outputFileExtension = '.' + outputFileExtension; } // create working directory File tempDirectory = FileSupport.getTempDirectory(Long.toString(job.getId())); File outputPath = new File(tempDirectory, sourceTrackFlavor + "_" + sourceFile.getName() + outputFileExtension); if (!outputPath.getParentFile().exists()) { outputPath.getParentFile().mkdirs(); } URI newTrackURI = null; inputfile.add(sourceFile.getAbsolutePath()); // default source - add to source table as 0 int srcIndex = inputfile.indexOf(sourceFile.getAbsolutePath()); // index = 0 logger.info("Start processing srcfile {}", sourceFile.getAbsolutePath()); try { // parse body elements for (SmilMediaObject element : smil.getBody().getMediaElements()) { // body should contain par elements if (element.isContainer()) { SmilMediaContainer container = (SmilMediaContainer) element; if (SmilMediaContainer.ContainerType.PAR == container.getContainerType()) { // par element should contain media elements for (SmilMediaObject elementChild : container.getElements()) { if (!elementChild.isContainer()) { SmilMediaElement media = (SmilMediaElement) elementChild; //logger.debug("Start processing smilMedia {}", media.toString()); if (trackParamGroupId.equals(media.getParamGroup())) { long begin = media.getClipBeginMS(); long end = media.getClipEndMS(); URI clipTrackURI = media.getSrc(); File clipSourceFile = null; if (clipTrackURI != null) { try { clipSourceFile = workspace.get(clipTrackURI); } catch (IOException ex) { throw new ProcessFailedException("Can't read " + clipTrackURI); } catch (NotFoundException ex) { throw new ProcessFailedException( "Workspace does not contain a track " + clipTrackURI); } } int index = -1; if (clipSourceFile != null) { // clip has different source index = inputfile.indexOf(clipSourceFile.getAbsolutePath()); // Look for known tracks if (index == -1) { inputfile.add(clipSourceFile.getAbsolutePath()); // add new track //TODO: inspect each new video file, bad input will throw exc } index = inputfile.indexOf(clipSourceFile.getAbsolutePath()); } else { index = srcIndex; // default src } videoclips.add(new VideoClip(index, begin / 1000.0, end / 1000.0)); } } else { throw new ProcessFailedException("Smil container '" + ((SmilMediaContainer) elementChild).getContainerType().toString() + "'is not supportet yet"); } } } else { throw new ProcessFailedException("Smil container '" + container.getContainerType().toString() + "'is not supportet yet"); } } } List<VideoClip> cleanclips = sortSegments(videoclips); // remove very short cuts that will look bad String error = null; String outputResolution = ""; //TODO: fetch the largest output resolution from SMIL.head.layout.root-layout // When outputResolution is set to WxH, all clips are scaled to that size in the output video. // TODO: Each clips could have a region id, relative to the root-layout // Then each clip is zoomed/panned/padded to WxH befor concatenation FFmpegEdit ffmpeg = new FFmpegEdit(properties); error = ffmpeg.processEdits(inputfile, outputPath.getAbsolutePath(), outputResolution, cleanclips, sourceTrack.hasAudio(), sourceTrack.hasVideo()); if (error != null) { FileUtils.deleteQuietly(tempDirectory); throw new ProcessFailedException("Editing pipeline exited abnormaly! Error: " + error); } // create Track for edited file String newTrackId = idBuilder.createNew().toString(); InputStream in = new FileInputStream(outputPath); try { newTrackURI = workspace.putInCollection(COLLECTION_ID, String.format("%s-%s%s", sourceTrackFlavor.getType(), newTrackId, outputFileExtension), in); } catch (IllegalArgumentException ex) { throw new ProcessFailedException("Copy track into workspace failed! " + ex.getMessage()); } finally { IOUtils.closeQuietly(in); FileUtils.deleteQuietly(tempDirectory); } //logger.debug("Copied the edited file from " + outputPath.toString() + " to workspace at " + String.format("%s-%s%s", sourceTrackFlavor.getType(), newTrackId, outputFileExtension) + " returns " + newTrackURI.toString()); // inspect new Track try { inspectionJob = inspect(job, newTrackURI); } catch (MediaInspectionException e) { throw new ProcessFailedException("Media inspection of " + newTrackURI + " failed", e); } Track editedTrack = (Track) MediaPackageElementParser.getFromXml(inspectionJob.getPayload()); logger.info("edited FILE " + inspectionJob.getPayload()); editedTrack.setIdentifier(newTrackId); editedTrack.setFlavor(new MediaPackageElementFlavor(sourceTrackFlavor.getType(), SINK_FLAVOR_SUBTYPE)); return editedTrack; } catch (MediaInspectionException ex) { throw new ProcessFailedException("Inspecting encoded Track failed with: " + ex.getMessage()); } catch (MediaPackageException ex) { throw new ProcessFailedException("Unable to serialize edited Track! " + ex.getMessage()); } catch (Exception ex) { throw new ProcessFailedException(ex.getMessage()); } finally { FileUtils.deleteQuietly(tempDirectory); } }
From source file:org.lamsfoundation.lams.admin.service.ImportService.java
public List<List> parseV1UsersFile(FormFile fileItem, boolean includeIntegrated) throws IOException { ArrayList<V1UserDTO> users = new ArrayList<V1UserDTO>(); ArrayList<V1OrganisationDTO> orgs = new ArrayList<V1OrganisationDTO>(); ArrayList<List> results = new ArrayList<List>(); ArrayList<String> integPrefixes = new ArrayList<String>(); ArrayList<String> integOrgid = new ArrayList<String>(); BufferedReader reader = new BufferedReader(new InputStreamReader(fileItem.getInputStream())); // get username prefixes, for integrations on the lams 1 server String line = reader.readLine(); while (!line.startsWith("login\tpassword")) { if (!line.startsWith("prefix")) { String[] lineArray = line.split("\t"); if (lineArray.length > 0) { integPrefixes.add(lineArray[0]); }// www . j av a2s .com if (lineArray.length > 1) { integOrgid.add(lineArray[1]); } } line = reader.readLine(); } // get user details // login, password, fname, lname, email line = reader.readLine(); // skip line containing column headings while (!line.startsWith("sid\tname")) { String[] userDetails = line.split("\t"); line = reader.readLine(); if (!includeIntegrated && isIntegratedUser(integPrefixes, userDetails[0])) { continue; } V1UserDTO userDTO = new V1UserDTO(userDetails[0], userDetails[1], userDetails[2], userDetails[3]); if (userDetails.length > 4 && !StringUtils.equals(userDetails[4], "NULL")) { userDTO.setEmail(userDetails[4]); } users.add(userDTO); } // get organisations // sid, name, description, account_organisation line = reader.readLine(); while (!line.startsWith("login\tg")) { String[] orgDetails = line.split("\t"); line = reader.readLine(); if (orgDetails.length != 4) { log.debug("LAMS 1 text file has troublesome organisation: "); for (int i = 0; i < orgDetails.length; i++) { log.debug("column: " + orgDetails[i]); } continue; } if (!includeIntegrated) { if (integOrgid.contains(orgDetails[0])) { continue; } } V1OrganisationDTO org = new V1OrganisationDTO(orgDetails[0], orgDetails[1], orgDetails[2], orgDetails[3]); orgs.add(org); } // gather user roles which are 1 role per user per line, into a dto of roles for the user // login, role id line = reader.readLine(); ArrayList<String> currentRoles = new ArrayList<String>(); String currentLogin = ""; while (!line.startsWith("login\tsid")) { String[] userRole = line.split("\t"); line = reader.readLine(); if (!includeIntegrated && isIntegratedUser(integPrefixes, userRole[0])) { continue; } if (!StringUtils.equals(userRole[0], currentLogin)) { if (!currentRoles.isEmpty()) { int index = users.indexOf(new V1UserDTO(currentLogin)); V1UserDTO userDTO = users.get(index); userDTO.setRoleIds(new ArrayList<String>(currentRoles)); users.set(index, userDTO); } currentLogin = userRole[0]; currentRoles.clear(); } currentRoles.add(userRole[1]); } int index = users.indexOf(new V1UserDTO(currentLogin)); V1UserDTO userDTO = users.get(index); userDTO.setRoleIds(new ArrayList<String>(currentRoles)); users.set(index, userDTO); // get user rights // login, org id, right id line = reader.readLine(); while (line != null) { String[] userRight = line.split("\t"); line = reader.readLine(); if (!includeIntegrated && isIntegratedUser(integPrefixes, userRight[0])) { continue; } V1OrgRightDTO orgRightDTO = new V1OrgRightDTO(userRight[1], userRight[2]); index = users.indexOf(new V1UserDTO(userRight[0])); userDTO = users.get(index); List<V1OrgRightDTO> orgRights = userDTO.getOrgRights(); if (orgRights == null) { orgRights = new ArrayList<V1OrgRightDTO>(); } orgRights.add(orgRightDTO); userDTO.setOrgRights(orgRights); users.set(index, userDTO); } results.add(users); results.add(orgs); return results; }
From source file:org.apache.axis.encoding.SerializationContextImpl.java
/** * Writes (using the Writer) the start tag for element QName along with the * indicated attributes and namespace mappings. * @param qName is the name of the element * @param attributes are the attributes to write */// w w w. jav a 2 s . c o m public void startElement(QName qName, Attributes attributes) throws IOException { java.util.ArrayList vecQNames = null; if (log.isDebugEnabled()) { log.debug(Messages.getMessage("startElem00", "[" + qName.getNamespaceURI() + "]:" + qName.getLocalPart())); } if (startOfDocument && sendXMLDecl) { writer.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); startOfDocument = false; } if (writingStartTag) { writer.write('>'); if (pretty) writer.write('\n'); indent++; } if (pretty) for (int i=0; i<indent; i++) writer.write(' '); String elementQName = qName2String(qName, true); writer.write('<'); writer.write(elementQName); if (attributes != null) { for (int i = 0; i < attributes.getLength(); i++) { String qname = attributes.getQName(i); writer.write(' '); String prefix = ""; String uri = attributes.getURI(i); if (uri != null && uri.length() > 0) { if (qname.length() == 0) { // If qname isn't set, generate one prefix = getPrefixForURI(uri); } else { // If it is, make sure the prefix looks reasonable. int idx = qname.indexOf(':'); if (idx > -1) { prefix = qname.substring(0, idx); prefix = getPrefixForURI(uri, prefix, true); } } if (prefix.length() > 0) { qname = prefix + ':' + attributes.getLocalName(i); } else { qname = attributes.getLocalName(i); } } else { qname = attributes.getQName(i); if(qname.length() == 0) qname = attributes.getLocalName(i); } if (qname.startsWith("xmlns")) { if (vecQNames == null) vecQNames = new ArrayList(); vecQNames.add(qname); } writer.write(qname); writer.write("=\""); writer.write(XMLUtils.xmlEncodeString(attributes.getValue(i))); writer.write('"'); } } if (noNamespaceMappings) { nsStack.push(); } else { for (Mapping map=nsStack.topOfFrame(); map!=null; map=nsStack.next()) { StringBuffer sb = new StringBuffer("xmlns"); if (map.getPrefix().length() > 0) { sb.append(':'); sb.append(map.getPrefix()); } if ((vecQNames==null) || (vecQNames.indexOf(sb.toString())==-1)) { writer.write(' '); sb.append("=\""); sb.append(map.getNamespaceURI()); sb.append('"'); writer.write(sb.toString()); } } noNamespaceMappings = true; } writingStartTag = true; elementStack.push(elementQName); onlyXML=true; }
From source file:com.edgenius.wiki.service.impl.ThemeServiceImpl.java
public List<Skin> getAvailableSkins() { /*/* www . j av a 2 s .c o m*/ * Get skin from install zip files list first, then validate if they are explode.... * I am just not very sure explode mode is good way to implement skin. maybe change to * servlet download theme resource rather than put them into web root directory * However, ... so far, read zip is not quite necessary... */ Map<Long, Skin> skinList = new TreeMap<Long, Skin>(new CompareToComparator<Long>( CompareToComparator.TYPE_KEEP_SAME_VALUE | CompareToComparator.DESCEND)); ArrayList<Skin> skins = new ArrayList<Skin>(); try { if (!skinResourcesRoot.getFile().isDirectory()) { log.error("The skin install root is not directory {}, no install skin detected", skinResourcesRoot.getFile().getAbsolutePath()); return skins; } File[] files = skinResourcesRoot.getFile() .listFiles((FileFilter) new SuffixFileFilter(INSTALL_EXT_NAME)); String appliedSkin = Global.Skin; if (StringUtils.isBlank(appliedSkin)) { appliedSkin = Skin.DEFAULT_SKIN; } for (File zip : files) { String skinXML = getMetafile(zip, "skin.xml"); InputStream sis = IOUtils.toInputStream(skinXML); Skin skin = Skin.fromXML(sis); IOUtils.closeQuietly(sis); if (StringUtils.isBlank(skin.getPreviewImageName())) { skin.setPreviewImageName(Skin.DEFAULT_PREVIEW_IMAGE); } //set initial status long factor = zip.lastModified(); if (appliedSkin.equalsIgnoreCase(skin.getName())) { factor = Long.MAX_VALUE; //always first. skin.setStatus(Skin.STATUS_APPLIED); //can not remove applied skin skin.setRemovable(false); } else { skin.setStatus(Skin.STATUS_CANDIDATE); skin.setRemovable(true); //we also assume user may put default skin zip to install directory if (Skin.DEFAULT_SKIN.equalsIgnoreCase(skin.getName())) { //can not remove default skin skin.setRemovable(false); } } //put into list skinList.put(factor, skin); } skins = new ArrayList<Skin>(skinList.values()); //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Append default skin and Decide Skin.STATUS_DEPLOYED if (!skinExplosionRoot.getFile().isDirectory()) { log.error("The skin explosion root is not directory {}, no deployed skin detected", skinExplosionRoot.getFile().getAbsolutePath()); } else { File[] deployDirs = skinExplosionRoot.getFile() .listFiles((FileFilter) DirectoryFileFilter.INSTANCE); for (File dir : deployDirs) { File file = getSkinXML(dir); if (!file.exists()) { log.warn("Unable to find skin.xml on skin directory {}", dir.getAbsolutePath()); continue; } //Load the default value of this skin from file system FileInputStream is = null; try { is = new FileInputStream(file); Skin skin = Skin.fromXML(is); if (skin != null) { int idx = skins.indexOf(skin); if (idx != -1) { skin = skins.get(idx); if (skin.getStatus() < Skin.STATUS_APPLIED) { skin.setStatus(Skin.STATUS_DEPLOYED); } } else { //No zipped default skin in install directory, so insert it to return list. if (Skin.DEFAULT_SKIN.equalsIgnoreCase(skin.getName())) { //can not remove default skin int ins; // if it is applied, always first, otherwise, second skin.setRemovable(false); if (appliedSkin.equalsIgnoreCase(skin.getName())) { ins = 0; skin.setStatus(Skin.STATUS_APPLIED); } else { ins = skins.size() > 0 ? 1 : 0; skin.setStatus(Skin.STATUS_DEPLOYED); } //insert default skin skins.add(ins, skin); } } } } catch (Exception e) { log.error("Failed load skin " + file.getAbsolutePath(), e); } finally { IOUtils.closeQuietly(is); } } for (Iterator<Skin> iter = skins.iterator(); iter.hasNext();) { Skin skin = iter.next(); if (skin.getStatus() == Skin.STATUS_CANDIDATE) { //remove it from list iter.remove(); log.warn("Skin {} is removed from visible list as it was not deployed.", skin.getName()); } } } } catch (IOException e) { log.error("Unable retrieve skin home directory.", e); } return skins; }
From source file:imitationNLG.SFX.java
public String chooseNextValue(String attribute, HashSet<String> attrValuesToBeMentioned, ArrayList<DatasetInstance> trainingData) { HashMap<String, Integer> relevantValues = new HashMap<>(); for (String attrValue : attrValuesToBeMentioned) { String attr = attrValue.substring(0, attrValue.indexOf('=')); String value = attrValue.substring(attrValue.indexOf('=') + 1); if (attr.equals(attribute)) { relevantValues.put(value, 0); }/*from ww w . j a v a 2s . c om*/ } if (!relevantValues.isEmpty()) { if (relevantValues.keySet().size() == 1) { for (String value : relevantValues.keySet()) { return value; } } else { String bestValue = ""; int minIndex = Integer.MAX_VALUE; for (String value : relevantValues.keySet()) { if (value.startsWith("x")) { int vI = Integer.parseInt(value.substring(1)); if (vI < minIndex) { minIndex = vI; bestValue = value; } } } if (!bestValue.isEmpty()) { return bestValue; } for (DatasetInstance di : trainingData) { for (ArrayList<Action> ac : di.getEvalMentionedValueSequences().keySet()) { ArrayList<String> mentionedValueSeq = di.getEvalMentionedValueSequences().get(ac); boolean doesSeqContainValues = true; minIndex = Integer.MAX_VALUE; for (String value : relevantValues.keySet()) { int index = mentionedValueSeq.indexOf(attribute + "=" + value); if (index != -1 && index < minIndex) { minIndex = index; bestValue = value; } else if (index == -1) { doesSeqContainValues = false; } } if (doesSeqContainValues) { relevantValues.put(bestValue, relevantValues.get(bestValue) + 1); } } } int max = -1; for (String value : relevantValues.keySet()) { if (relevantValues.get(value) > max) { max = relevantValues.get(value); bestValue = value; } } return bestValue; } } return ""; }
From source file:nl.systemsgenetics.cellTypeSpecificAlleleSpecificExpression.PhasedEntry.java
public PhasedEntry(String asLocations, String couplingLoc, String outputLocation, String cellPropLoc, String phasingLocation, String regionLocation) throws IOException, Exception { /**//from w w w.ja v a2 s.c o m * This method will perform a binomial test for some test region. * later additional features will be add. * * currently the flow of the program: * 1. read all SNPs from AS files and add overdispersion and cellprop to the files * 2. read phasing and assign alleles for these snps * 3. load test regions and determine test snps. * 5. determine log likelihood for test-snps. (with some deduplication) */ // 1. Read all SNPs from AS files ArrayList<String> allFiles = UtilityMethods.readFileIntoStringArrayList(asLocations); ReadAsLinesIntoIndividualSNPdata asReader = new ReadAsLinesIntoIndividualSNPdata(asLocations); HashMap<String, ArrayList<IndividualSnpData>> snpHashMap = new HashMap<String, ArrayList<IndividualSnpData>>(); HashMap<String, String> posNameMap = new HashMap<String, String>(); //first determine overdispersion values per SNP. ArrayList<BetaBinomOverdispInSample> dispersionParameters = new ArrayList<BetaBinomOverdispInSample>(); String dispersionOutput = FilenameUtils.getFullPath(outputLocation) + FilenameUtils.getBaseName(outputLocation) + "_dispersionFile.txt"; PrintWriter dispersionWriter = new PrintWriter(dispersionOutput, "UTF-8"); dispersionWriter.write("Filename\tdispersion"); int i = 0; for (String asLoc : allFiles) { dispersionParameters.add(new BetaBinomOverdispInSample(asLoc)); dispersionWriter.printf("%s\t%.6f\n", dispersionParameters.get(i).getSampleName(), dispersionParameters.get(i).getOverdispersion()[0]); } dispersionWriter.close(); if (GlobalVariables.verbosity >= 10) { System.out.println("--------------------------------------------------"); System.out.println("Finished dispersion estimates for all individuals."); System.out.println("--------------------------------------------------"); } boolean hasCellProp = false; ArrayList<String> phenoString = new ArrayList<String>(); if (cellPropLoc != null) { hasCellProp = true; phenoString = UtilityMethods.readFileIntoStringArrayList(cellPropLoc); } //second reading of the ASfiles. while (true) { //read some stuff from the files. ArrayList<IndividualSnpData> tempSNPdata; tempSNPdata = asReader.getIndividualsFromNextLine(); if (tempSNPdata.isEmpty()) break; //I can safely assume all snps are the same per line, based on //checks done in the getIndividualsFromNextLine. String snpName = tempSNPdata.get(0).getSnpName(); String chr = tempSNPdata.get(0).getChromosome(); String posString = tempSNPdata.get(0).getPosition(); //add dispersionValues to the SNPs: for (int j = 0; j < tempSNPdata.size(); j++) { if (!tempSNPdata.get(j).getSampleName().equals(dispersionParameters.get(j).getSampleName())) { System.out.println(tempSNPdata.get(j).getSampleName()); System.out.println(dispersionParameters.get(j).getSampleName()); throw new IllegalDataException( "the name of the individual in the dispersion data is not the same as the individual name in the SNP"); } tempSNPdata.get(j).setDispersion(dispersionParameters.get(j).getOverdispersion()[0]); if (hasCellProp) { tempSNPdata.get(j).setCellTypeProp(Double.parseDouble(phenoString.get(j))); } } posNameMap.put(chr + ":" + posString, snpName); //take the SNP name and arraylist and put in the hashmap. snpHashMap.put(chr + ":" + posString, tempSNPdata); } if (GlobalVariables.verbosity >= 10) { System.out.println("all AS info Snps were read"); } // 2. Load test regions and determine the snps in the region. if (GlobalVariables.verbosity >= 10) { System.out.println("Starting the assignment of snps to regions."); } ArrayList<GenomicRegion> allRegions; allRegions = ReadGenomicRegions(regionLocation); // 3. Read phasing info for these snps Pair<HashMap<String, ArrayList<IndividualSnpData>>, ArrayList<GenomicRegion>> phasedPair; phasedPair = addPhasingToSNPHashMap(snpHashMap, couplingLoc, allRegions, phasingLocation); snpHashMap = phasedPair.getLeft(); allRegions = phasedPair.getRight(); phasedPair = null; if (GlobalVariables.verbosity >= 10) { System.out.println("Added phasing information to AS values of snps."); } /** * 4. Start testing, per region.: * * 4.1 Detemine the test snp in the region, this will be the reference value * 4.2 Determine the heterozygotes for the test snp. * 4.3 switch alt and ref values of the heterozygotes in the test region * respective of the test snp. add the new list to a binomial test. * 4.4 do the test in the BinomialTest.java and others in the future. * * */ //write output to these files. PrintWriter writerBinom = new PrintWriter(FilenameUtils.getFullPath(outputLocation) + FilenameUtils.getBaseName(outputLocation) + "_Binomial_results.txt", "UTF-8"); PrintWriter writerBetaBinom = new PrintWriter(FilenameUtils.getFullPath(outputLocation) + FilenameUtils.getBaseName(outputLocation) + "_BetaBinomial_results.txt", "UTF-8"); PrintWriter writerCTSBinom = new PrintWriter(FilenameUtils.getFullPath(outputLocation) + FilenameUtils.getBaseName(outputLocation) + "_CellTypeSpecificBinomial_results.txt", "UTF-8"); PrintWriter writerCTSBetaBinom = new PrintWriter(FilenameUtils.getFullPath(outputLocation) + FilenameUtils.getBaseName(outputLocation) + "_CellTypeSpecificBetaBinomial_results.txt", "UTF-8"); for (GenomicRegion iRegion : allRegions) { System.out.println(iRegion.getAnnotation()); // I may want to change this into all test SNPS needs to be implemented still. // compared to all snps in the region. ArrayList<String> snpsInRegion = iRegion.getSnpInRegions(); ArrayList<IndividualSnpData> allHetsInRegion = new ArrayList<IndividualSnpData>(); //Don't want to do this in every iteration in the next loop. for (String regionSnp : snpsInRegion) { allHetsInRegion.addAll( UtilityMethods.isolateHeterozygotesFromIndividualSnpData(snpHashMap.get(regionSnp))); } HashSet<String> combinationsDone = new HashSet<String>(); HashMap<String, BinomialTest> storedBinomTests = new HashMap<String, BinomialTest>(); HashMap<String, BetaBinomialTest> storedBetaBinomTests = new HashMap<String, BetaBinomialTest>(); ///PLEASE NOTE, CELL TYPE SPECIFIC FUNCTIONALITY HAS NOT YET BEEN IMPLEMENTED. //Plan is to use this in the future but keeping them in HashMap<String, CTSbinomialTest> storedCTSBinomTests = new HashMap<String, CTSbinomialTest>(); HashMap<String, CTSBetaBinomialTest> storedCTSBetaBinomTests = new HashMap<String, CTSBetaBinomialTest>(); for (String testSnp : snpsInRegion) { ArrayList<IndividualSnpData> hetTestSnps = UtilityMethods .isolateHeterozygotesFromIndividualSnpData(snpHashMap.get(testSnp)); //Check if the snpp has phasing, but also see if there are heterozygous SNPs in the region. try { if (!hetTestSnps.get(0).hasPhasing()) { System.out.println("\tno phasing"); continue; } } catch (Exception e) { continue; } StringBuilder inputIdA = new StringBuilder(); StringBuilder inputIdB = new StringBuilder(); ArrayList<String> hetTestNames = new ArrayList<String>(); for (IndividualSnpData hetSample : hetTestSnps) { inputIdA.append(hetSample.sampleName); inputIdA.append(hetSample.getPhasingFirst()); inputIdB.append(hetSample.sampleName); inputIdB.append(hetSample.getPhasingSecond()); hetTestNames.add(hetSample.sampleName); } String refStringA = inputIdA.toString(); String refStringB = inputIdB.toString(); if (hetTestSnps.size() >= GlobalVariables.minHets) { //make sure I don't have to do two tests double. if (combinationsDone.contains(refStringA)) { BinomialTest binomForAddition = storedBinomTests.get(refStringA); BetaBinomialTest betaBinomForAddition = storedBetaBinomTests.get(refStringA); //there is duplication here to make sure it is stored under the correct name. if (binomForAddition == null) { binomForAddition = storedBinomTests.get(refStringB); binomForAddition.addAdditionalSNP(hetTestSnps.get(0).snpName, hetTestSnps.get(0).position); betaBinomForAddition = storedBetaBinomTests.get(refStringB); betaBinomForAddition.addAdditionalSNP(hetTestSnps.get(0).snpName, hetTestSnps.get(0).position); storedBinomTests.put(refStringB, binomForAddition); storedBetaBinomTests.put(refStringB, betaBinomForAddition); } else { binomForAddition.addAdditionalSNP(hetTestSnps.get(0).snpName, hetTestSnps.get(0).position); storedBinomTests.put(refStringA, binomForAddition); betaBinomForAddition.addAdditionalSNP(hetTestSnps.get(0).snpName, hetTestSnps.get(0).position); storedBetaBinomTests.put(refStringA, betaBinomForAddition); } continue; } ArrayList<IndividualSnpData> phasedSNPsForTest = new ArrayList<IndividualSnpData>(); Set<String> uniqueGeneSNPnames = new HashSet<String>(); // The following loop determines which SNPs will be used for // test data. for (int j = 0; j < allHetsInRegion.size(); j++) { IndividualSnpData thisHet = allHetsInRegion.get(j); int snpPos = Integer.parseInt(thisHet.position); //First check if the Heterozygote is in the test region if (snpPos < iRegion.getStartPosition() || snpPos > iRegion.getEndPosition()) { continue; } String sampleName = thisHet.sampleName; uniqueGeneSNPnames.add(thisHet.snpName); if (!hetTestNames.contains(thisHet.sampleName) || !thisHet.hasPhasing()) { continue; } //this is the heterozygote to compare to. IndividualSnpData hetToCompareTo = hetTestSnps.get(hetTestNames.indexOf(sampleName)); if (hetToCompareTo.getPhasingFirst() != thisHet.getPhasingFirst()) { // because it is a heterozygote, we can assume that // first is 0 and second is 1, or the other way around. // if the first in this snp doesn't match the // first in test, we will have to switch the ref, alt // alleles int temp = thisHet.refNum; thisHet.refNum = thisHet.altNum; thisHet.altNum = temp; } phasedSNPsForTest.add(thisHet); } if (GlobalVariables.verbosity >= 10) { System.out.println("\n----------------------------------------"); System.out.println("Testing Region: " + iRegion.getAnnotation()); System.out.println("With the following test SNP: " + hetTestSnps.get(0).snpName); System.out.println("Using the following gene SNPs: "); int whatSNP = 0; System.out.print("\t[ "); for (String snpName : uniqueGeneSNPnames) { System.out.print(snpName); if ((whatSNP % 4 == 3) && (whatSNP != uniqueGeneSNPnames.size() - 1)) { System.out.print(",\n\t "); } else if (whatSNP != uniqueGeneSNPnames.size() - 1) { System.out.print(", "); } whatSNP += 1; } System.out.println(" ]"); System.out.println("----------------------------------------\n"); } BinomialTest thisBinomTest; thisBinomTest = BinomialTest.phasedBinomialTest(phasedSNPsForTest, iRegion, hetTestSnps.size()); thisBinomTest.addAdditionalSNP(hetTestSnps.get(0).snpName, hetTestSnps.get(0).position); thisBinomTest.setGenotype(hetTestSnps.get(0).genotype); storedBinomTests.put(refStringA, thisBinomTest); BetaBinomialTest thisBetaBinomTest; thisBetaBinomTest = BetaBinomialTest.phasedBetaBinomialTest(phasedSNPsForTest, iRegion, hetTestSnps.size()); thisBetaBinomTest.addAdditionalSNP(hetTestSnps.get(0).snpName, hetTestSnps.get(0).position); thisBetaBinomTest.setGenotype(hetTestSnps.get(0).genotype); storedBetaBinomTests.put(refStringA, thisBetaBinomTest); //make sure we don't have to do the computationally intensive tests again. combinationsDone.add(refStringA); combinationsDone.add(refStringB); } } for (String thisTestName : storedBinomTests.keySet()) { BinomialTest thisBinomTest = storedBinomTests.get(thisTestName); BetaBinomialTest thisBetaBinomTest = storedBetaBinomTests.get(thisTestName); if (thisBinomTest.isTestPerformed()) { writerBinom.println(writeBinomialTestOutput(thisBinomTest)); writerBetaBinom.println(writeBetaBinomialTestOutput(thisBetaBinomTest)); } } } //close the files writerBinom.close(); writerBetaBinom.close(); writerCTSBinom.close(); writerCTSBetaBinom.close(); }
From source file:org.apache.axis.encoding.SerializationContext.java
/** * Writes (using the Writer) the start tag for element QName along with the * indicated attributes and namespace mappings. * @param qName is the name of the element * @param attributes are the attributes to write *//*from w w w.j a v a 2 s.c o m*/ public void startElement(QName qName, Attributes attributes) throws IOException { java.util.ArrayList vecQNames = null; if (debugEnabled) { log.debug(Messages.getMessage("startElem00", "[" + qName.getNamespaceURI() + "]:" + qName.getLocalPart())); } if (startOfDocument && sendXMLDecl) { writeXMLDeclaration(); } if (writingStartTag) { writer.write('>'); if (pretty) writer.write('\n'); indent++; } if (pretty) for (int i = 0; i < indent; i++) writer.write(' '); String elementQName = qName2String(qName, true); writer.write('<'); writer.write(elementQName); if (writeXMLType != null) { attributes = setTypeAttribute(attributes, writeXMLType); writeXMLType = null; } if (attributes != null) { for (int i = 0; i < attributes.getLength(); i++) { String qname = attributes.getQName(i); writer.write(' '); String prefix = ""; String uri = attributes.getURI(i); if (uri != null && uri.length() > 0) { if (qname.length() == 0) { // If qname isn't set, generate one prefix = getPrefixForURI(uri); } else { // If it is, make sure the prefix looks reasonable. int idx = qname.indexOf(':'); if (idx > -1) { prefix = qname.substring(0, idx); prefix = getPrefixForURI(uri, prefix, true); } } if (prefix.length() > 0) { qname = prefix + ':' + attributes.getLocalName(i); } else { qname = attributes.getLocalName(i); } } else { qname = attributes.getQName(i); if (qname.length() == 0) qname = attributes.getLocalName(i); } if (qname.startsWith("xmlns")) { if (vecQNames == null) vecQNames = new ArrayList(); vecQNames.add(qname); } writer.write(qname); writer.write("=\""); getEncoder().writeEncoded(writer, attributes.getValue(i)); writer.write('"'); } } if (noNamespaceMappings) { nsStack.push(); } else { for (Mapping map = nsStack.topOfFrame(); map != null; map = nsStack.next()) { if (!(map.getNamespaceURI().equals(Constants.NS_URI_XMLNS) && map.getPrefix().equals("xmlns")) && !(map.getNamespaceURI().equals(Constants.NS_URI_XML) && map.getPrefix().equals("xml"))) { StringBuffer sb = new StringBuffer("xmlns"); if (map.getPrefix().length() > 0) { sb.append(':'); sb.append(map.getPrefix()); } if ((vecQNames == null) || (vecQNames.indexOf(sb.toString()) == -1)) { writer.write(' '); sb.append("=\""); sb.append(map.getNamespaceURI()); sb.append('"'); writer.write(sb.toString()); } } } noNamespaceMappings = true; } writingStartTag = true; elementStack.push(elementQName); onlyXML = true; }