List of usage examples for java.util LinkedHashSet add
boolean add(E e);
From source file:net.sf.jabref.importer.fileformat.JSONEntryParser.java
/** * Convert a JSONObject containing a bibJSON entry to a BibEntry * * @param bibJsonEntry The JSONObject to convert * @return the converted BibEntry/* w ww.j ava 2 s . c om*/ */ public BibEntry parseBibJSONtoBibtex(JSONObject bibJsonEntry) { // Fields that are directly accessible at the top level BibJson object String[] singleFieldStrings = { FieldName.YEAR, FieldName.TITLE, FieldName.ABSTRACT, FieldName.MONTH }; // Fields that are accessible in the journal part of the BibJson object String[] journalSingleFieldStrings = { FieldName.PUBLISHER, FieldName.NUMBER, FieldName.VOLUME }; BibEntry entry = new BibEntry(); entry.setType("article"); // Authors if (bibJsonEntry.has("author")) { JSONArray authors = bibJsonEntry.getJSONArray("author"); List<String> authorList = new ArrayList<>(); for (int i = 0; i < authors.length(); i++) { if (authors.getJSONObject(i).has("name")) { authorList.add(authors.getJSONObject(i).getString("name")); } else { LOGGER.info("Empty author name."); } } entry.setField(FieldName.AUTHOR, String.join(" and ", authorList)); } else { LOGGER.info("No author found."); } // Direct accessible fields for (String field : singleFieldStrings) { if (bibJsonEntry.has(field)) { entry.setField(field, bibJsonEntry.getString(field)); } } // Page numbers if (bibJsonEntry.has("start_page")) { if (bibJsonEntry.has("end_page")) { entry.setField(FieldName.PAGES, bibJsonEntry.getString("start_page") + "--" + bibJsonEntry.getString("end_page")); } else { entry.setField(FieldName.PAGES, bibJsonEntry.getString("start_page")); } } // Journal if (bibJsonEntry.has("journal")) { JSONObject journal = bibJsonEntry.getJSONObject("journal"); // Journal title if (journal.has("title")) { entry.setField(FieldName.JOURNAL, journal.getString("title")); } else { LOGGER.info("No journal title found."); } // Other journal related fields for (String field : journalSingleFieldStrings) { if (journal.has(field)) { entry.setField(field, journal.getString(field)); } } } else { LOGGER.info("No journal information found."); } // Keywords if (bibJsonEntry.has("keywords")) { JSONArray keywords = bibJsonEntry.getJSONArray("keywords"); LinkedHashSet<String> keywordList = new LinkedHashSet<>(); for (int i = 0; i < keywords.length(); i++) { if (!keywords.isNull(i)) { keywordList.add(keywords.getString(i)); } } entry.putKeywords(keywordList, Globals.prefs.get(JabRefPreferences.KEYWORD_SEPARATOR)); } // Identifiers if (bibJsonEntry.has("identifier")) { JSONArray identifiers = bibJsonEntry.getJSONArray("identifier"); for (int i = 0; i < identifiers.length(); i++) { String type = identifiers.getJSONObject(i).getString("type"); if ("doi".equals(type)) { entry.setField(FieldName.DOI, identifiers.getJSONObject(i).getString("id")); } else if ("pissn".equals(type)) { entry.setField(FieldName.ISSN, identifiers.getJSONObject(i).getString("id")); } else if ("eissn".equals(type)) { entry.setField(FieldName.ISSN, identifiers.getJSONObject(i).getString("id")); } } } // Links if (bibJsonEntry.has("link")) { JSONArray links = bibJsonEntry.getJSONArray("link"); for (int i = 0; i < links.length(); i++) { if (links.getJSONObject(i).has("type")) { String type = links.getJSONObject(i).getString("type"); if ("fulltext".equals(type) && links.getJSONObject(i).has("url")) { entry.setField(FieldName.URL, links.getJSONObject(i).getString("url")); } } } } return entry; }
From source file:com.hp.autonomy.idol.parametricvalues.IdolParametricValuesService.java
@Override public Set<QueryTagInfo> getAllParametricValues(final IdolParametricRequest idolParametricRequest) throws AciErrorException { final Collection<String> fieldNames = new HashSet<>(); fieldNames.addAll(idolParametricRequest.getFieldNames()); if (fieldNames.isEmpty()) { fieldNames.addAll(getTagNames()); }//from w ww . j av a 2s .c o m final Set<QueryTagInfo> results; if (fieldNames.isEmpty()) { results = Collections.emptySet(); } else { final AciParameters aciParameters = new AciParameters(TagActions.GetQueryTagValues.name()); aciParameters.add(QueryParams.Combine.name(), CombineParam.Simple); aciParameters.add(QueryParams.Text.name(), idolParametricRequest.getQueryText()); aciParameters.add(QueryParams.FieldText.name(), idolParametricRequest.getFieldText()); aciParameters.add(QueryParams.DatabaseMatch.name(), new Databases(idolParametricRequest.getDatabases())); aciParameters.add(QueryParams.MinDate.name(), formatDate(idolParametricRequest.getMinDate())); aciParameters.add(QueryParams.MaxDate.name(), formatDate(idolParametricRequest.getMaxDate())); aciParameters.add(QueryParams.AnyLanguage.name(), true); aciParameters.add(GetQueryTagValuesParams.DocumentCount.name(), true); aciParameters.add(GetQueryTagValuesParams.MaxValues.name(), MAX_VALUES); aciParameters.add(GetQueryTagValuesParams.FieldName.name(), StringUtils.join(fieldNames.toArray(), ',')); aciParameters.add(GetQueryTagValuesParams.Sort.name(), SortParam.DocumentCount.name()); final GetQueryTagValuesResponseData responseData = contentAciService.executeAction(aciParameters, queryTagValuesResponseProcessor); final List<FlatField> fields = responseData.getField(); results = new LinkedHashSet<>(fields.size()); for (final FlatField field : fields) { final List<JAXBElement<? extends Serializable>> valueElements = field.getValueOrSubvalueOrValues(); final LinkedHashSet<QueryTagCountInfo> values = new LinkedHashSet<>(valueElements.size()); for (final JAXBElement<?> element : valueElements) { if (VALUE_NODE_NAME.equals(element.getName().getLocalPart())) { final TagValue tagValue = (TagValue) element.getValue(); values.add(new QueryTagCountInfo(tagValue.getValue(), tagValue.getCount())); } } final String fieldName = getFieldNameFromPath(field.getName().get(0)); if (!values.isEmpty()) { results.add(new QueryTagInfo(fieldName, values)); } } } return results; }
From source file:com.hp.autonomy.searchcomponents.idol.parametricvalues.IdolParametricValuesService.java
@Override public Set<QueryTagInfo> getAllParametricValues(final IdolParametricRequest parametricRequest) throws AciErrorException { final Collection<String> fieldNames = new HashSet<>(); fieldNames.addAll(parametricRequest.getFieldNames()); if (fieldNames.isEmpty()) { fieldNames.addAll(fieldsService.getParametricFields(new IdolFieldsRequest.Builder().build())); }/*from www. j a va 2 s . c o m*/ final Set<QueryTagInfo> results; if (fieldNames.isEmpty()) { results = Collections.emptySet(); } else { final AciParameters aciParameters = new AciParameters(TagActions.GetQueryTagValues.name()); parameterHandler.addSearchRestrictions(aciParameters, parametricRequest.getQueryRestrictions()); if (parametricRequest.isModified()) { parameterHandler.addQmsParameters(aciParameters, parametricRequest.getQueryRestrictions()); } aciParameters.add(GetQueryTagValuesParams.DocumentCount.name(), true); aciParameters.add(GetQueryTagValuesParams.MaxValues.name(), parametricRequest.getMaxValues()); aciParameters.add(GetQueryTagValuesParams.FieldName.name(), StringUtils.join(fieldNames.toArray(), ',')); aciParameters.add(GetQueryTagValuesParams.Sort.name(), SortParam.DocumentCount.name()); final GetQueryTagValuesResponseData responseData = contentAciService.executeAction(aciParameters, queryTagValuesResponseProcessor); final List<FlatField> fields = responseData.getField(); results = new LinkedHashSet<>(fields.size()); for (final FlatField field : fields) { final List<JAXBElement<? extends Serializable>> valueElements = field.getValueOrSubvalueOrValues(); final LinkedHashSet<QueryTagCountInfo> values = new LinkedHashSet<>(valueElements.size()); for (final JAXBElement<?> element : valueElements) { if (VALUE_NODE_NAME.equals(element.getName().getLocalPart())) { final TagValue tagValue = (TagValue) element.getValue(); values.add(new QueryTagCountInfo(tagValue.getValue(), tagValue.getCount())); } } final String fieldName = getFieldNameFromPath(field.getName().get(0)); if (!values.isEmpty()) { results.add(new QueryTagInfo(fieldName, values)); } } } return results; }
From source file:com.redhat.rhn.frontend.action.configuration.BaseRankChannels.java
/** * Sets up the rangling widget.// w w w . j ava 2 s .co m * @param context the request context of the current request * @param form the dynaform related to the current request. * @param set the rhnset holding the channel ids. */ protected void setupWidget(RequestContext context, DynaActionForm form, RhnSet set) { User user = context.getCurrentUser(); LinkedHashSet labelValues = new LinkedHashSet(); populateWidgetLabels(labelValues, context); for (Iterator itr = set.getElements().iterator(); itr.hasNext();) { Long ccid = ((RhnSetElement) itr.next()).getElement(); ConfigChannel channel = ConfigurationManager.getInstance().lookupConfigChannel(user, ccid); labelValues.add(lv(channel.getName(), channel.getId().toString())); } //set the form variables for the widget to read. form.set(POSSIBLE_CHANNELS, labelValues); if (!labelValues.isEmpty()) { if (form.get(SELECTED_CHANNEL) == null) { String selected = ((LabelValueBean) labelValues.iterator().next()).getValue(); form.set(SELECTED_CHANNEL, selected); } } }
From source file:org.chromium.content_shell.Shell.java
private void updateHistory(String url) { String json = mPref.getString("history", null); JSONArray array = new JSONArray(); if (json != null) { try {/*w w w .j a v a 2s . c o m*/ array = new JSONArray(json); } catch (JSONException e) { // TODO Auto-generated catch block e.printStackTrace(); } } LinkedHashSet<String> history = new LinkedHashSet<String>(); for (int i = 0; i < array.length(); i++) { try { history.add(array.getString(i)); } catch (JSONException e) { // TODO Auto-generated catch block e.printStackTrace(); } } if (history.contains(url)) { history.remove(url); } history.add(url); if (history.size() > 100) { String f = history.iterator().next(); history.remove(f); } array = new JSONArray(); for (String u : history) { array.put(u); } mPref.edit().putString("history", array.toString()).commit(); }
From source file:org.apache.nifi.processors.standard.AttributesToCSV.java
private LinkedHashSet<String> attributeListStringToSet(String attributeList) { //take the user specified attribute list string and convert to list of strings. LinkedHashSet<String> result = new LinkedHashSet<>(); if (StringUtils.isNotBlank(attributeList)) { String[] ats = attributeList.split(SPLIT_REGEX); for (String str : ats) { result.add(StringEscapeUtils.unescapeCsv(str.trim())); }//from w ww .ja v a 2 s.c o m } return result; }
From source file:org.rhq.enterprise.server.sync.test.ExportingInputStreamTest.java
private <T> LinkedHashSet<T> asSet(T... ts) { LinkedHashSet<T> ret = new LinkedHashSet<T>(); for (T t : ts) { ret.add(t); }/* ww w. j a v a 2s . c om*/ return ret; }
From source file:opennlp.tools.jsmlearning.JSMLearnerOnLatticeBase.java
public JSMDecision buildLearningModel(List<String> posTexts, List<String> negTexts, String unknown, String[] separationKeywords) { psPos = new LinguisticPatternStructure(0, 0); psNeg = new LinguisticPatternStructure(0, 0); if (separationKeywords != null) { // re-sort by occurrence of separation keyword Pair<List<String>, List<String>> pair = reGroupByOccurrenceOfSeparationKeyword(posTexts, negTexts, separationKeywords);//from w ww.j av a 2 s . c om posTexts = pair.getFirst(); negTexts = pair.getSecond(); } List<List<List<ParseTreeChunk>>> lingRepsPos = new ArrayList<List<List<ParseTreeChunk>>>(), lingRepsNeg = new ArrayList<List<List<ParseTreeChunk>>>(); for (String text : posTexts) lingRepsPos.add(chunk_maker.formGroupedPhrasesFromChunksForPara(text)); for (String text : negTexts) lingRepsNeg.add(chunk_maker.formGroupedPhrasesFromChunksForPara(text)); LinkedHashSet<Integer> obj = null; int i = 0; for (List<List<ParseTreeChunk>> chunk : lingRepsPos) { obj = new LinkedHashSet<Integer>(); obj.add(i); psPos.AddIntent(chunk, obj, 0); i++; } i = 0; for (List<List<ParseTreeChunk>> chunk : lingRepsNeg) { obj = new LinkedHashSet<Integer>(); obj.add(i); psNeg.AddIntent(chunk, obj, 0); i++; } List<List<ParseTreeChunk>> chunksUnknown = chunk_maker.formGroupedPhrasesFromChunksForPara(unknown); List<List<List<ParseTreeChunk>>> posIntersections = new ArrayList<List<List<ParseTreeChunk>>>(), negIntersections = new ArrayList<List<List<ParseTreeChunk>>>(); List<List<ParseTreeChunk>> intersection = null; for (int iConcept = 0; iConcept < psPos.conceptList.size(); iConcept++) { if (psPos.conceptList.get(iConcept).intent != null && psPos.conceptList.get(iConcept).intent.size() > 0) { intersection = computeIntersectionWithIntentExtendedByDeduction(psPos, iConcept, chunksUnknown); if (reduceList(intersection).size() > 0) posIntersections.add(reduceList(intersection)); } if (psNeg.conceptList.get(iConcept).intent != null && psNeg.conceptList.get(iConcept).intent.size() > 0) { intersection = md.matchTwoSentencesGroupedChunksDeterministic( psNeg.conceptList.get(iConcept).intent, chunksUnknown); if (reduceList(intersection).size() > 0) negIntersections.add(reduceList(intersection)); } } Pair<List<List<List<ParseTreeChunk>>>, List<List<List<ParseTreeChunk>>>> pair = removeInconsistenciesFromPosNegIntersections( posIntersections, negIntersections); posIntersections = pair.getFirst(); negIntersections = pair.getSecond(); List<List<List<ParseTreeChunk>>> posIntersectionsUnderNeg = new ArrayList<List<List<ParseTreeChunk>>>(), negIntersectionsUnderPos = new ArrayList<List<List<ParseTreeChunk>>>(); for (int iConcept = 0; iConcept < psNeg.conceptList.size(); iConcept++) { for (int iConceptJ = 0; iConceptJ < negIntersections.size(); iConceptJ++) { intersection = md.matchTwoSentencesGroupedChunksDeterministic( psNeg.conceptList.get(iConcept).intent, negIntersections.get(iConceptJ)); if (reduceList(intersection).size() > 0) posIntersectionsUnderNeg.add(reduceList(intersection)); } } for (int iConcept = 0; iConcept < psPos.conceptList.size(); iConcept++) { for (int iConceptJ = 0; iConceptJ < posIntersections.size(); iConceptJ++) { intersection = md.matchTwoSentencesGroupedChunksDeterministic( psPos.conceptList.get(iConcept).intent, posIntersections.get(iConceptJ)); if (reduceList(intersection).size() > 0) negIntersectionsUnderPos.add(reduceList(intersection)); } } List<ParseTreeChunk> posIntersectionsUnderNegLst = flattenParseTreeChunkLst(posIntersectionsUnderNeg); List<ParseTreeChunk> negIntersectionsUnderPosLst = flattenParseTreeChunkLst(negIntersectionsUnderPos); posIntersectionsUnderNegLst = subtract(posIntersectionsUnderNegLst, negIntersectionsUnderPosLst); negIntersectionsUnderPosLst = subtract(negIntersectionsUnderPosLst, posIntersectionsUnderNegLst); System.out.println("Pos - neg inters = " + posIntersectionsUnderNegLst); System.out.println("Neg - pos inters = " + negIntersectionsUnderPosLst); Boolean bPositiveClass = (float) posIntersectionsUnderNegLst.size() / (float) negIntersectionsUnderPosLst.size() > 1f; JSMDecision decision = new JSMDecision("keywordClassName", bPositiveClass, posIntersections, negIntersections, posIntersectionsUnderNeg, negIntersectionsUnderPos, separationKeywords); return decision; }
From source file:org.pentaho.di.ui.trans.steps.annotation.OptionsResolver.java
public String[] resolveOrdinalFieldOptions(final TransMeta transMeta, final String stepName, ModelAnnotation modelAnnotation) { LinkedHashSet<String> names = new LinkedHashSet<String>(); try {/*from w w w.j ava 2 s .co m*/ RowMetaInterface prevStepFields = transMeta.getPrevStepFields(stepName); for (ValueMetaInterface valueMetaInterface : prevStepFields.getValueMetaList()) { if (!StringUtils.equals(modelAnnotation.getAnnotation().getField(), valueMetaInterface.getName())) { names.add(valueMetaInterface.getName()); } } } catch (Exception e) { logger.warning(e.getMessage()); } return names.toArray(new String[names.size()]); }
From source file:com.spotify.hamcrest.jackson.IsJsonObject.java
@Override protected boolean matchesNode(ObjectNode node, Description mismatchDescription) { LinkedHashSet<String> mismatchedKeys = new LinkedHashSet<>(); for (Map.Entry<String, Matcher<? super JsonNode>> entryMatcher : entryMatchers.entrySet()) { final String key = entryMatcher.getKey(); final Matcher<? super JsonNode> valueMatcher = entryMatcher.getValue(); final JsonNode value = node.path(key); if (!valueMatcher.matches(value)) { mismatchedKeys.add(key); }//w w w . j a va 2 s . co m } if (!mismatchedKeys.isEmpty()) { describeMismatches(node, mismatchDescription, mismatchedKeys); return false; } return true; }