List of usage examples for java.util LinkedHashMap values
public Collection<V> values()
From source file:nzilbb.csv.CsvDeserializer.java
/** * Sets parameters for deserializer as a whole. This might include database connection parameters, locations of supporting files, etc. * <p>When the deserializer is installed, this method should be invoked with an empty parameter * set, to discover what (if any) general configuration is required. If parameters are * returned, and user interaction is possible, then the user may be presented with an * interface for setting/confirming these parameters. * @param configuration The configuration for the deserializer. * @param schema The layer schema, definining layers and the way they interrelate. * @return A list of configuration parameters (still) must be set before {@link IDeserializer#setParameters(ParameterSet)} can be invoked. If this is an empty list, {@link IDeserializer#setParameters(ParameterSet)} can be invoked. If it's not an empty list, this method must be invoked again with the returned parameters' values set. *///from w ww . ja v a2s . c om public ParameterSet configure(ParameterSet configuration, Schema schema) { setSchema(schema); setParticipantLayer(schema.getParticipantLayer()); setTurnLayer(schema.getTurnLayer()); setUtteranceLayer(schema.getUtteranceLayer()); setWordLayer(schema.getWordLayer()); // set any values that have been passed in for (Parameter p : configuration.values()) try { p.apply(this); } catch (Exception x) { } // create a list of layers we need and possible matching layer names LinkedHashMap<Parameter, List<String>> layerToPossibilities = new LinkedHashMap<Parameter, List<String>>(); HashMap<String, LinkedHashMap<String, Layer>> layerToCandidates = new HashMap<String, LinkedHashMap<String, Layer>>(); // do we need to ask for participant/turn/utterance/word layers? LinkedHashMap<String, Layer> possibleParticipantLayers = new LinkedHashMap<String, Layer>(); LinkedHashMap<String, Layer> possibleTurnLayers = new LinkedHashMap<String, Layer>(); LinkedHashMap<String, Layer> possibleTurnChildLayers = new LinkedHashMap<String, Layer>(); LinkedHashMap<String, Layer> wordTagLayers = new LinkedHashMap<String, Layer>(); LinkedHashMap<String, Layer> participantTagLayers = new LinkedHashMap<String, Layer>(); if (getParticipantLayer() == null || getTurnLayer() == null || getUtteranceLayer() == null || getWordLayer() == null) { for (Layer top : schema.getRoot().getChildren().values()) { if (top.getAlignment() == Constants.ALIGNMENT_NONE) { if (top.getChildren().size() == 0) { // unaligned childless children of graph participantTagLayers.put(top.getId(), top); } else { // unaligned children of graph, with children of their own possibleParticipantLayers.put(top.getId(), top); for (Layer turn : top.getChildren().values()) { if (turn.getAlignment() == Constants.ALIGNMENT_INTERVAL && turn.getChildren().size() > 0) { // aligned children of who with their own children possibleTurnLayers.put(turn.getId(), turn); for (Layer turnChild : turn.getChildren().values()) { if (turnChild.getAlignment() == Constants.ALIGNMENT_INTERVAL) { // aligned children of turn possibleTurnChildLayers.put(turnChild.getId(), turnChild); for (Layer tag : turnChild.getChildren().values()) { if (tag.getAlignment() == Constants.ALIGNMENT_NONE) { // unaligned children of word wordTagLayers.put(tag.getId(), tag); } } // next possible word tag layer } } // next possible turn child layer } } // next possible turn layer } // with children } // unaligned } // next possible participant layer } // missing special layers else { for (Layer turnChild : getTurnLayer().getChildren().values()) { if (turnChild.getAlignment() == Constants.ALIGNMENT_INTERVAL) { possibleTurnChildLayers.put(turnChild.getId(), turnChild); } } // next possible word tag layer for (Layer tag : getWordLayer().getChildren().values()) { if (tag.getAlignment() == Constants.ALIGNMENT_NONE && tag.getChildren().size() == 0) { wordTagLayers.put(tag.getId(), tag); } } // next possible word tag layer for (Layer tag : getParticipantLayer().getChildren().values()) { if (tag.getAlignment() == Constants.ALIGNMENT_NONE && tag.getChildren().size() == 0) { participantTagLayers.put(tag.getId(), tag); } } // next possible word tag layer } participantTagLayers.remove("main_participant"); if (getParticipantLayer() == null) { layerToPossibilities.put( new Parameter("participantLayer", Layer.class, "Participant layer", "Layer for speaker/participant identification", true), Arrays.asList("participant", "participants", "who", "speaker", "speakers")); layerToCandidates.put("participantLayer", possibleParticipantLayers); } if (getTurnLayer() == null) { layerToPossibilities.put( new Parameter("turnLayer", Layer.class, "Turn layer", "Layer for speaker turns", true), Arrays.asList("turn", "turns")); layerToCandidates.put("turnLayer", possibleTurnLayers); } if (getUtteranceLayer() == null) { layerToPossibilities.put(new Parameter("utteranceLayer", Layer.class, "Utterance layer", "Layer for speaker utterances", true), Arrays.asList("utterance", "utterances", "line", "lines")); layerToCandidates.put("utteranceLayer", possibleTurnChildLayers); } if (getWordLayer() == null) { layerToPossibilities.put( new Parameter("wordLayer", Layer.class, "Word layer", "Layer for individual word tokens", true), Arrays.asList("transcript", "word", "words", "w")); layerToCandidates.put("wordLayer", possibleTurnChildLayers); } LinkedHashMap<String, Layer> graphTagLayers = new LinkedHashMap<String, Layer>(); for (Layer top : schema.getRoot().getChildren().values()) { if (top.getAlignment() == Constants.ALIGNMENT_NONE && top.getChildren().size() == 0) { // unaligned childless children of graph graphTagLayers.put(top.getId(), top); } } // next top level layer graphTagLayers.remove("corpus"); graphTagLayers.remove("transcript_type"); // other layers... // add parameters that aren't in the configuration yet, and set possibile/default values for (Parameter p : layerToPossibilities.keySet()) { List<String> possibleNames = layerToPossibilities.get(p); LinkedHashMap<String, Layer> candidateLayers = layerToCandidates.get(p.getName()); if (configuration.containsKey(p.getName())) { p = configuration.get(p.getName()); } else { configuration.addParameter(p); } if (p.getValue() == null) { p.setValue(Utility.FindLayerById(candidateLayers, possibleNames)); } p.setPossibleValues(candidateLayers.values()); } return configuration; }
From source file:org.devgateway.ocds.web.rest.controller.CostEffectivenessVisualsController.java
@ApiOperation(value = "Aggregated version of /api/costEffectivenessTenderAmount and " + "/api/costEffectivenessAwardAmount." + "This endpoint aggregates the responses from the specified endpoints, per year. " + "Responds to the same filters.") @RequestMapping(value = "/api/costEffectivenessTenderAwardAmount", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> costEffectivenessTenderAwardAmount( @ModelAttribute @Valid final GroupingFilterPagingRequest filter) { Future<List<DBObject>> costEffectivenessAwardAmountFuture = controllerLookupService.asyncInvoke( new AsyncBeanParamControllerMethodCallable<List<DBObject>, GroupingFilterPagingRequest>() { @Override// w w w . j a v a 2s . c o m public List<DBObject> invokeControllerMethod(GroupingFilterPagingRequest filter) { return costEffectivenessAwardAmount(filter); } }, filter); Future<List<DBObject>> costEffectivenessTenderAmountFuture = controllerLookupService.asyncInvoke( new AsyncBeanParamControllerMethodCallable<List<DBObject>, GroupingFilterPagingRequest>() { @Override public List<DBObject> invokeControllerMethod(GroupingFilterPagingRequest filter) { return costEffectivenessTenderAmount(filter); } }, filter); //this is completely unnecessary since the #get methods are blocking //controllerLookupService.waitTillDone(costEffectivenessAwardAmountFuture, costEffectivenessTenderAmountFuture); LinkedHashMap<Object, DBObject> response = new LinkedHashMap<>(); try { costEffectivenessAwardAmountFuture.get() .forEach(dbobj -> response.put(getYearMonthlyKey(filter, dbobj), dbobj)); costEffectivenessTenderAmountFuture.get().forEach(dbobj -> { if (response.containsKey(getYearMonthlyKey(filter, dbobj))) { Map<?, ?> map = dbobj.toMap(); map.remove(Keys.YEAR); if (filter.getMonthly()) { map.remove(Keys.MONTH); } response.get(getYearMonthlyKey(filter, dbobj)).putAll(map); } else { response.put(getYearMonthlyKey(filter, dbobj), dbobj); } }); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } Collection<DBObject> respCollection = response.values(); respCollection.forEach(dbobj -> { BigDecimal totalTenderAmount = BigDecimal.valueOf(dbobj.get(Keys.TOTAL_TENDER_AMOUNT) == null ? 0d : ((Number) dbobj.get(Keys.TOTAL_TENDER_AMOUNT)).doubleValue()); BigDecimal totalAwardAmount = BigDecimal.valueOf(dbobj.get(Keys.TOTAL_AWARD_AMOUNT) == null ? 0d : ((Number) dbobj.get(Keys.TOTAL_AWARD_AMOUNT)).doubleValue()); dbobj.put(Keys.DIFF_TENDER_AWARD_AMOUNT, totalTenderAmount.subtract(totalAwardAmount)); dbobj.put(Keys.PERCENTAGE_AWARD_AMOUNT, totalTenderAmount.compareTo(BigDecimal.ZERO) != 0 ? (totalAwardAmount.setScale(15).divide(totalTenderAmount, BigDecimal.ROUND_HALF_UP) .multiply(ONE_HUNDRED)) : BigDecimal.ZERO); dbobj.put(Keys.PERCENTAGE_DIFF_AMOUNT, totalTenderAmount.compareTo(BigDecimal.ZERO) != 0 ? (((BigDecimal) dbobj.get(Keys.DIFF_TENDER_AWARD_AMOUNT)).setScale(15) .divide(totalTenderAmount, BigDecimal.ROUND_HALF_UP).multiply(ONE_HUNDRED)) : BigDecimal.ZERO); }); return new ArrayList<>(respCollection); }
From source file:me.fireant.photoselect.persenter.PhotoSelectPersenterImpl.java
@Override public void loadFolder() { this.mLoaderManager.initLoader(1000, null, new LoaderManager.LoaderCallbacks<Cursor>() { @Override//from w w w . j a va2 s. c o m public Loader<Cursor> onCreateLoader(int id, Bundle args) { Uri uri = MediaStore.Images.Media.EXTERNAL_CONTENT_URI; return new CursorLoader(mContext, uri, projection, "", null, MediaStore.Images.Media.DATE_MODIFIED + " DESC"); } @Override public void onLoadFinished(Loader<Cursor> loader, Cursor cursor) { if (loader == null) { return; } // ?? // <??> LinkedHashMap<String, Folder> folders = new LinkedHashMap<>(); Folder allPhotoFolder = new Folder(""); folders.put(allPhotoFolder.getFolderName(), allPhotoFolder); while (cursor.moveToNext()) { String name = cursor .getString(cursor.getColumnIndex(MediaStore.Images.Media.BUCKET_DISPLAY_NAME)); String path = cursor.getString(cursor.getColumnIndex(MediaStore.Images.Media.DATA)); Photo photoInfo = new Photo(path); // if (!folders.containsKey(name)) { Folder newFolder = new Folder(name); folders.put(name, newFolder); } // folders.get(name).addPhoto(photoInfo); // allPhotoFolder.addPhoto(photoInfo); } ArrayList<Folder> folderData = new ArrayList<>(); for (Folder folder : folders.values()) { folderData.add(folder); } if (folderData != null) { mMvpView.showFolder(folderData); } } @Override public void onLoaderReset(Loader<Cursor> loader) { } }); }
From source file:gate.util.reporting.DocTimeReporter.java
/** * Sorts LinkedHashMap by its values(natural descending order). keeps the * duplicates as it is.//from w w w .j av a2 s. c om * * @param passedMap * An Object of type LinkedHashMap to be sorted by its values. * @return An Object containing the sorted LinkedHashMap. */ private LinkedHashMap<?, ?> sortHashMapByValues(LinkedHashMap<String, String> passedMap) { List<String> mapKeys = new ArrayList<String>(passedMap.keySet()); List<String> mapValues = new ArrayList<String>(passedMap.values()); Collections.sort(mapValues, new ValueComparator()); Collections.sort(mapKeys); // Reversing the collection to sort the values in descending order Collections.reverse(mapValues); LinkedHashMap<String, String> sortedMap = new LinkedHashMap<String, String>(); Iterator<String> valueIt = mapValues.iterator(); while (valueIt.hasNext()) { String val = valueIt.next(); Iterator<String> keyIt = mapKeys.iterator(); while (keyIt.hasNext()) { String key = keyIt.next(); String comp1 = passedMap.get(key).toString(); String comp2 = val.toString(); if (comp1.equals(comp2)) { passedMap.remove(key); mapKeys.remove(key); sortedMap.put(key, val); break; } } } return sortedMap; }
From source file:org.apache.hadoop.hive.ql.parse.NewGroupByUtils1.java
@SuppressWarnings("unchecked") private GroupByOperator genNewGroupByPlanGroupByOperator(QB qb, String dest, Operator inputOperatorInfo, Mode mode, ArrayList<GenericUDAFEvaluator> genericUDAFEvaluators, ArrayList<ArrayList<Integer>> tag2AggrPos, ArrayList<ArrayList<ASTNode>> tag2AggrParamAst, HashMap<Integer, ArrayList<Integer>> nonDistPos2TagOffs) throws SemanticException { RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo).getRR(); QBParseInfo parseInfo = qb.getParseInfo(); RowResolver groupByOutputRowResolver = new RowResolver(); groupByOutputRowResolver.setIsExprResolver(true); RowSchema operatorRowSchema = new RowSchema(); operatorRowSchema.setSignature(new Vector<ColumnInfo>()); Map<String, exprNodeDesc> colExprMap = new HashMap<String, exprNodeDesc>(); ArrayList<exprNodeDesc> groupByKeys = new ArrayList<exprNodeDesc>(); ArrayList<String> outputColumnNames = new ArrayList<String>(); List<ASTNode> grpByExprs = SemanticAnalyzer.getGroupByForClause(parseInfo, dest); int colid = 0; if (qb.getParseInfo().getDestContainsGroupbyCubeOrRollupClause(dest)) { String colName = getColumnInternalName(colid++); outputColumnNames.add(colName);/*from w ww . j a v a2 s . co m*/ ColumnInfo info = groupByInputRowResolver.get("", NewGroupByUtils1._CUBE_ROLLUP_GROUPINGSETS_TAG_); exprNodeDesc grpByExprNode = new exprNodeColumnDesc(info.getType(), info.getInternalName(), info.getAlias(), info.getIsPartitionCol()); groupByKeys.add(grpByExprNode); ColumnInfo colInfo = new ColumnInfo(colName, grpByExprNode.getTypeInfo(), "", false); groupByOutputRowResolver.put("", NewGroupByUtils1._CUBE_ROLLUP_GROUPINGSETS_TAG_, colInfo); operatorRowSchema.getSignature().add(colInfo); colExprMap.put(colName, grpByExprNode); } for (int i = 0; i < grpByExprs.size(); i++) { ASTNode grpbyExpr = grpByExprs.get(i); exprNodeDesc grpByExprNode = SemanticAnalyzer.genExprNodeDesc(grpbyExpr, groupByInputRowResolver, qb, -1, conf); groupByKeys.add(grpByExprNode); String colName = getColumnInternalName(colid++); outputColumnNames.add(colName); ColumnInfo colInfo = new ColumnInfo(colName, grpByExprNode.getTypeInfo(), "", false); groupByOutputRowResolver.putExpression(grpbyExpr, colInfo); operatorRowSchema.getSignature().add(colInfo); colExprMap.put(colName, grpByExprNode); } boolean containsfunctions = tag2AggrPos != null && tag2AggrPos.size() > 0; boolean containsnondistinctfunctions = containsfunctions && tag2AggrPos.get(0).size() > 0; LinkedHashMap<String, ASTNode> aggregationTrees = parseInfo.getAggregationExprsForClause(dest); ArrayList<ASTNode> aggregationTreesArray = new ArrayList<ASTNode>(aggregationTrees.size()); aggregationTreesArray.addAll(aggregationTrees.values()); HashMap<Integer, Integer> pos2tag = new HashMap<Integer, Integer>(); for (int tag = 0; tag < tag2AggrPos.size(); tag++) { for (Integer pos : tag2AggrPos.get(tag)) { pos2tag.put(pos, tag); } } ArrayList<ArrayList<exprNodeDesc>> tag2AggrParamORValueExpr = new ArrayList<ArrayList<exprNodeDesc>>(); ArrayList<aggregationDesc> aggregations = null; aggregations = new ArrayList<aggregationDesc>(aggregationTrees.size()); for (int i = 0; i < aggregationTrees.size(); i++) { aggregations.add(null); } exprNodeDesc aggrPartExpr = null; if (mode == Mode.HASH) { if (containsfunctions) { String colNameAggrPart = getColumnInternalName(colid++); outputColumnNames.add(colNameAggrPart); List<TypeInfo> unionTypes = new ArrayList<TypeInfo>(); for (int tag = 0; tag < tag2AggrParamAst.size(); tag++) { tag2AggrParamORValueExpr.add(new ArrayList<exprNodeDesc>()); ArrayList<exprNodeDesc> aggParameters = new ArrayList<exprNodeDesc>(); for (int j = 0; j < tag2AggrParamAst.get(tag).size(); j++) { ASTNode paraExpr = (ASTNode) tag2AggrParamAst.get(tag).get(j); exprNodeDesc exprNode = SemanticAnalyzer.genExprNodeDesc(paraExpr, groupByInputRowResolver, qb, -1, conf); tag2AggrParamORValueExpr.get(tag).add(exprNode); aggParameters.add(exprNode); } ArrayList<String> names = new ArrayList<String>(); ArrayList<TypeInfo> typeInfos = new ArrayList<TypeInfo>(); if (tag == 0) { if (!containsnondistinctfunctions) { names.add("nondistnull"); typeInfos.add(TypeInfoFactory.voidTypeInfo); } else { int posoff = 0; for (Integer pos : tag2AggrPos.get(tag)) { ASTNode value = aggregationTreesArray.get(pos); String aggName = value.getChild(0).getText(); boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; GenericUDAFEvaluator.Mode amode = SemanticAnalyzer.groupByDescModeToUDAFMode(mode, isDistinct); GenericUDAFEvaluator genericUDAFEvaluator = genericUDAFEvaluators.get(pos); assert (genericUDAFEvaluator != null); ArrayList<exprNodeDesc> aggParameters1 = aggParameters; ArrayList<Integer> offs = nonDistPos2TagOffs.get(pos); aggParameters1 = new ArrayList<exprNodeDesc>(); for (Integer off : offs) { aggParameters1.add(aggParameters.get(off)); } GenericUDAFInfo udaf = SemanticAnalyzer.getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters1); aggregations.set(pos, new aggregationDesc(aggName.toLowerCase(), udaf.genericUDAFEvaluator, udaf.convertedParameters, isDistinct, amode)); String innername = getColumnInternalName(posoff); String field = colNameAggrPart + ":" + tag + "." + innername; ColumnInfo outColInfo = new ColumnInfo(field, udaf.returnType, "", false); groupByOutputRowResolver.put("", _AGGRPARTTAG_ + tag + "_" + posoff, outColInfo); posoff++; names.add(innername); typeInfos.add(udaf.returnType); } } } else { for (int i = 0; i < tag2AggrParamORValueExpr.get(tag).size(); i++) { String innername = getColumnInternalName(i); TypeInfo innertype = tag2AggrParamORValueExpr.get(tag).get(i).getTypeInfo(); String field = colNameAggrPart + ":" + tag + "." + innername; ColumnInfo outColInfo = new ColumnInfo(field, innertype, "", false); groupByOutputRowResolver.put("", _AGGRPARTTAG_ + tag + "_" + i, outColInfo); names.add(innername); typeInfos.add(innertype); } } unionTypes.add(TypeInfoFactory.getStructTypeInfo(names, typeInfos)); } ColumnInfo outColInfo = new ColumnInfo(colNameAggrPart, TypeInfoFactory.getUnionTypeInfo(unionTypes), "", false); groupByOutputRowResolver.put("", _GBY_AGGRPART_OUTPUT_COLNAME_, outColInfo); operatorRowSchema.getSignature().add(outColInfo); } } else if (mode == Mode.PARTIAL1 || mode == Mode.PARTIALS) { if (containsfunctions) { ColumnInfo aggrPartInfo = groupByInputRowResolver.get("", _GBY_AGGRPART_OUTPUT_COLNAME_); aggrPartExpr = new exprNodeColumnDesc(aggrPartInfo.getType(), aggrPartInfo.getInternalName(), "", false); String colNameAggrPart = getColumnInternalName(colid++); outputColumnNames.add(colNameAggrPart); List<TypeInfo> unionTypes = new ArrayList<TypeInfo>(); for (int tag = 0; tag < tag2AggrParamAst.size(); tag++) { tag2AggrParamORValueExpr.add(new ArrayList<exprNodeDesc>()); ArrayList<exprNodeDesc> aggParameters = new ArrayList<exprNodeDesc>(); int paramlen = (tag == 0 && mode == Mode.PARTIALS) ? tag2AggrPos.get(tag).size() : tag2AggrParamAst.get(tag).size(); for (int j = 0; j < paramlen; j++) { ColumnInfo inputColInfo = groupByInputRowResolver.get("", _AGGRPARTTAG_ + tag + "_" + j); exprNodeDesc exprNode = new exprNodeColumnDesc(inputColInfo.getType(), inputColInfo.getInternalName(), "", false); tag2AggrParamORValueExpr.get(tag).add(exprNode); aggParameters.add(exprNode); } ArrayList<String> names = new ArrayList<String>(); ArrayList<TypeInfo> typeInfos = new ArrayList<TypeInfo>(); int posoff = 0; for (Integer pos : tag2AggrPos.get(tag)) { ASTNode value = aggregationTreesArray.get(pos); String aggName = value.getChild(0).getText(); boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; GenericUDAFEvaluator.Mode amode = SemanticAnalyzer.groupByDescModeToUDAFMode(mode, isDistinct); GenericUDAFEvaluator genericUDAFEvaluator = genericUDAFEvaluators.get(pos); assert (genericUDAFEvaluator != null); ArrayList<exprNodeDesc> aggParameters1 = aggParameters; if (tag == 0 && mode == Mode.PARTIAL1) { ArrayList<Integer> offs = nonDistPos2TagOffs.get(pos); aggParameters1 = new ArrayList<exprNodeDesc>(); for (Integer off : offs) { aggParameters1.add(aggParameters.get(off)); } } else if (tag == 0) { aggParameters1 = new ArrayList<exprNodeDesc>(); aggParameters1.add(aggParameters.get(posoff)); } GenericUDAFInfo udaf = SemanticAnalyzer.getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters1); aggregations.set(pos, new aggregationDesc(aggName.toLowerCase(), udaf.genericUDAFEvaluator, udaf.convertedParameters, isDistinct, amode)); String innername = getColumnInternalName(posoff); String field = colNameAggrPart + ":" + tag + "." + innername; ColumnInfo outColInfo = new ColumnInfo(field, udaf.returnType, "", false); groupByOutputRowResolver.put("", _AGGRPARTTAG_ + tag + "_" + posoff, outColInfo); posoff++; names.add(innername); typeInfos.add(udaf.returnType); } if (names.isEmpty()) { names.add("nondistnull"); typeInfos.add(TypeInfoFactory.voidTypeInfo); } unionTypes.add(TypeInfoFactory.getStructTypeInfo(names, typeInfos)); } ColumnInfo outColInfo = new ColumnInfo(colNameAggrPart, TypeInfoFactory.getUnionTypeInfo(unionTypes), "", false); groupByOutputRowResolver.put("", _GBY_AGGRPART_OUTPUT_COLNAME_, outColInfo); operatorRowSchema.getSignature().add(outColInfo); } } else if (mode == Mode.MERGEPARTIAL || mode == Mode.FINAL || mode == Mode.COMPLETE) { if (containsfunctions) { ColumnInfo aggrPartInfo = groupByInputRowResolver.get("", _GBY_AGGRPART_OUTPUT_COLNAME_); aggrPartExpr = new exprNodeColumnDesc(aggrPartInfo.getType(), aggrPartInfo.getInternalName(), "", false); HashMap<Integer, String> pos2colname = new HashMap<Integer, String>(); for (int pos = 0; pos < aggregationTreesArray.size(); pos++) { String colName = getColumnInternalName(colid++); outputColumnNames.add(colName); pos2colname.put(pos, colName); } HashMap<Integer, ColumnInfo> pos2valueInfo = new HashMap<Integer, ColumnInfo>(); for (int tag = 0; tag < tag2AggrPos.size(); tag++) { tag2AggrParamORValueExpr.add(new ArrayList<exprNodeDesc>()); ArrayList<exprNodeDesc> aggParameters = new ArrayList<exprNodeDesc>(); int aggrlen = (mode == Mode.FINAL) ? tag2AggrPos.get(tag).size() : ((mode == Mode.COMPLETE) ? tag2AggrParamAst.get(tag).size() : ((tag == 0) ? tag2AggrPos.get(tag).size() : tag2AggrParamAst.get(tag).size())); for (int j = 0; j < aggrlen; j++) { ColumnInfo inputColInfo = groupByInputRowResolver.get("", _AGGRPARTTAG_ + tag + "_" + j); exprNodeDesc exprNode = new exprNodeColumnDesc(inputColInfo.getType(), inputColInfo.getInternalName(), "", false); tag2AggrParamORValueExpr.get(tag).add(exprNode); aggParameters.add(exprNode); } int posoff = 0; for (Integer pos : tag2AggrPos.get(tag)) { ASTNode value = aggregationTreesArray.get(pos); String aggName = value.getChild(0).getText(); boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; GenericUDAFEvaluator.Mode amode = SemanticAnalyzer.groupByDescModeToUDAFMode(mode, isDistinct); GenericUDAFEvaluator genericUDAFEvaluator = genericUDAFEvaluators.get(pos); assert (genericUDAFEvaluator != null); ArrayList<exprNodeDesc> aggParameters1 = aggParameters; if (tag == 0 && mode == Mode.COMPLETE) { ArrayList<Integer> offs = nonDistPos2TagOffs.get(pos); aggParameters1 = new ArrayList<exprNodeDesc>(); for (Integer off : offs) { aggParameters1.add(aggParameters.get(off)); } } else if (tag == 0 || mode == Mode.FINAL) { aggParameters1 = new ArrayList<exprNodeDesc>(); aggParameters1.add(aggParameters.get(posoff)); } GenericUDAFInfo udaf = SemanticAnalyzer.getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters1); aggregations.set(pos, new aggregationDesc(aggName.toLowerCase(), udaf.genericUDAFEvaluator, udaf.convertedParameters, isDistinct, amode)); ColumnInfo valueColInfo = new ColumnInfo(pos2colname.get(pos), udaf.returnType, "", false); pos2valueInfo.put(pos, valueColInfo); posoff++; } } for (int pos = 0; pos < aggregationTreesArray.size(); pos++) { groupByOutputRowResolver.putExpression(aggregationTreesArray.get(pos), pos2valueInfo.get(pos)); operatorRowSchema.getSignature().add(pos2valueInfo.get(pos)); } } } else if (mode == Mode.PARTIAL2) { } GroupByOperator op = (GroupByOperator) putOpInsertMap( OperatorFactory.getAndMakeChild(new groupByDesc(mode, outputColumnNames, groupByKeys, aggregations, tag2AggrPos, tag2AggrParamORValueExpr, aggrPartExpr), operatorRowSchema, inputOperatorInfo), groupByOutputRowResolver); op.setColumnExprMap(colExprMap); return op; }
From source file:org.tinymediamanager.core.movie.MovieRenamerPreview.java
public static MovieRenamerPreviewContainer renameMovie(Movie movie) { MovieRenamerPreviewContainer container = new MovieRenamerPreviewContainer(movie); LinkedHashMap<String, MediaFile> oldFiles = new LinkedHashMap<>(); Set<MediaFile> newFiles = new LinkedHashSet<>(); String newVideoBasename = ""; if (MovieModuleManager.MOVIE_SETTINGS.getMovieRenamerFilename().trim().isEmpty()) { // we are NOT renaming any files, so we keep the same name on renaming ;) newVideoBasename = movie.getVideoBasenameWithoutStacking(); } else {//from ww w .j ava 2s. co m // since we rename, generate the new basename MediaFile ftr = MovieRenamer .generateFilename(movie, movie.getMediaFiles(MediaFileType.VIDEO).get(0), newVideoBasename) .get(0); newVideoBasename = FilenameUtils.getBaseName(ftr.getFilenameWithoutStacking()); } // VIDEO needs to be renamed first, since all others depend on that name!!! for (MediaFile mf : movie.getMediaFiles(MediaFileType.VIDEO)) { oldFiles.put(mf.getFileAsPath().toString(), new MediaFile(mf)); MediaFile ftr = MovieRenamer.generateFilename(movie, mf, newVideoBasename).get(0); // there can be only one newFiles.add(ftr); } // all the other MFs... for (MediaFile mf : movie.getMediaFilesExceptType(MediaFileType.VIDEO)) { oldFiles.put(mf.getFileAsPath().toString(), new MediaFile(mf)); newFiles.addAll(MovieRenamer.generateFilename(movie, mf, newVideoBasename)); // N:M } // movie folder needs a rename? Path oldMovieFolder = movie.getPathNIO(); String pattern = MovieModuleManager.MOVIE_SETTINGS.getMovieRenamerPathname(); if (pattern.isEmpty()) { // same container.newPath = Paths.get(movie.getDataSource()).relativize(movie.getPathNIO()); } else { container.newPath = Paths.get(MovieRenamer.createDestinationForFoldername(pattern, movie)); } Path newMovieFolder = Paths.get(movie.getDataSource()).resolve(container.newPath); if (!oldMovieFolder.equals(newMovieFolder)) { container.needsRename = true; // update already the "old" files with new path, so we can simply do a contains check ;) for (MediaFile omf : oldFiles.values()) { omf.replacePathForRenamedFolder(oldMovieFolder, newMovieFolder); } } // change status of MFs, if they have been added or not for (MediaFile mf : newFiles) { if (!oldFiles.containsKey(mf.getFileAsPath().toString())) { // System.out.println(mf); container.needsRename = true; break; } } for (MediaFile mf : oldFiles.values()) { if (!newFiles.contains(mf)) { // System.out.println(mf); container.needsRename = true; break; } } container.newMediaFiles.addAll(newFiles); return container; }
From source file:eionet.cr.dao.virtuoso.VirtuosoEndpointHarvestQueryDAO.java
@Override public void move(String endpointUrl, Set<Integer> ids, int direction) throws DAOException { if (StringUtils.isBlank(endpointUrl) || ids == null || ids.isEmpty()) { return;/* ww w . jav a2 s .c o m*/ } if (direction == 0) { throw new IllegalArgumentException("Direction must not be 0!"); } // Prepare map where we can get queries by position, also find the max and min positions. LinkedHashMap<Integer, EndpointHarvestQueryDTO> queriesByPos = getQueriesByPosition(endpointUrl); if (queriesByPos.isEmpty()) { return; } Set<Integer> positions = queriesByPos.keySet(); int maxPos = Collections.max(positions); int minPos = Collections.min(positions); Connection conn = null; try { conn = getSQLConnection(); conn.setAutoCommit(false); // If even one query is already at position 1 then moving up is not considered possible. // And conversely, if even one query is already at the last position, then moving down // is not considered possible either. boolean isMovingPossible = true; List<Integer> selectedPositions = new ArrayList<Integer>(); List<EndpointHarvestQueryDTO> queries = new ArrayList<EndpointHarvestQueryDTO>(queriesByPos.values()); for (EndpointHarvestQueryDTO query : queries) { if (ids.contains(query.getId())) { int pos = query.getPosition(); if ((direction < 0 && pos == minPos) || (direction > 0 && pos == maxPos)) { isMovingPossible = false; } else { selectedPositions.add(pos); } } } if (isMovingPossible) { if (direction < 0) { for (Integer selectedPosition : selectedPositions) { EndpointHarvestQueryDTO queryToMove = queriesByPos.get(selectedPosition); int i = queries.indexOf(queryToMove); queries.set(i, queries.get(i - 1)); queries.set(i - 1, queryToMove); } } else { for (int j = selectedPositions.size() - 1; j >= 0; j--) { EndpointHarvestQueryDTO queryToMove = queriesByPos.get(selectedPositions.get(j)); int i = queries.indexOf(queryToMove); queries.set(i, queries.get(i + 1)); queries.set(i + 1, queryToMove); } } } SQLUtil.executeUpdate(INCREASE_POSITIONS_SQL, Arrays.asList(maxPos, endpointUrl), conn); for (int i = 0; i < queries.size(); i++) { SQLUtil.executeUpdate(UPDATE_POSITION_SQL, Arrays.asList(i + 1, queries.get(i).getId()), conn); } conn.commit(); } catch (Exception e) { SQLUtil.rollback(conn); throw new DAOException(e.getMessage(), e); } finally { SQLUtil.close(conn); } }
From source file:org.nuxeo.ecm.webapp.directory.ChainSelectActionsBean.java
public void add(ActionEvent event) { ChainSelect chainSelect = getChainSelect(event); FacesContext context = FacesContext.getCurrentInstance(); boolean allowBranchSelection = chainSelect.getBooleanProperty("allowBranchSelection", false); boolean allowRootSelection = chainSelect.getBooleanProperty("allowRootSelection", false); int size = chainSelect.getSize(); String clientId = chainSelect.getClientId(context); LinkedHashMap<String, Selection> map = new LinkedHashMap<String, Selection>(); for (Selection selection : chainSelect.getComponentValue()) { map.put(selection.getValue(chainSelect.getKeySeparator()), selection); }//from w w w .ja va2 s . co m for (Selection selection : chainSelect.getSelections()) { int selectionSize = selection.getSize(); if (!allowRootSelection && selectionSize == 0) { String messageStr = ComponentUtils.translate(context, "label.chainSelect.empty_selection"); FacesMessage message = new FacesMessage(messageStr); context.addMessage(clientId, message); chainSelect.setValid(false); return; } if (!allowBranchSelection && selectionSize > 0 && selectionSize != size) { String messageStr = ComponentUtils.translate(context, "label.chainSelect.incomplete_selection"); FacesMessage message = new FacesMessage(messageStr); context.addMessage(clientId, message); chainSelect.setValid(false); return; } map.put(selection.getValue(chainSelect.getKeySeparator()), selection); } Selection[] componentValue = map.values().toArray(new Selection[0]); String[] submittedValue; if (componentValue.length == 0) { submittedValue = null; } else { submittedValue = new String[componentValue.length]; for (int i = 0; i < componentValue.length; i++) { submittedValue[i] = componentValue[i].getValue(chainSelect.getKeySeparator()); } } chainSelect.setComponentValue(componentValue); chainSelect.setSubmittedValue(submittedValue); context.renderResponse(); log.debug("add: submittedValue=" + ChainSelect.format(submittedValue)); }
From source file:pt.lsts.neptus.util.logdownload.LogsDownloaderWorkerActions.java
private void addTheNewFoldersAnFillTheReturnedExistentAndNewLists(LinkedHashMap<FTPFile, String> retList, LinkedList<LogFolderInfo> existenteLogFoldersFromServer, LinkedList<LogFolderInfo> newLogFoldersFromServer) throws InterruptedException, InvocationTargetException { for (String newLogName : retList.values()) { if (stopLogListProcessing) return; final LogFolderInfo newLogDir = new LogFolderInfo(newLogName); if (gui.logFolderList.containsFolder(newLogDir)) { existenteLogFoldersFromServer.add(gui.logFolderList.getFolder((newLogDir.getName()))); } else {/*from w w w. j a va 2 s . co m*/ newLogFoldersFromServer.add(newLogDir); SwingUtilities.invokeAndWait(new Runnable() { @Override public void run() { gui.logFolderList.addFolder(newLogDir); } }); } } // msgPanel.writeMessageTextln("Logs Folders: " + logFolderList.myModel.size()); }
From source file:it.iit.genomics.cru.structures.bridges.uniprot.UniprotkbUtils.java
private Collection<MoleculeEntry> getUniprotEntriesXML(String location, boolean waitAndRetryOnFailure) throws BridgesRemoteAccessException { String url = location + "&format=xml"; ArrayList<MoleculeEntry> uniprotEntries = new ArrayList<>(); try {//from w w w . j ava 2 s . co m HttpClient client = new DefaultHttpClient(); client.getParams().setParameter(ClientPNames.ALLOW_CIRCULAR_REDIRECTS, Boolean.TRUE); HttpGet request = new HttpGet(url); // add request header request.addHeader("User-Agent", USER_AGENT); HttpResponse response = client.execute(request); if (response.getEntity().getContentLength() == 0) { // No result return uniprotEntries; } DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); Document doc = dBuilder.parse(new InputSource(response.getEntity().getContent())); // optional, but recommended // read this - // http://stackoverflow.com/questions/13786607/normalization-in-dom-parsing-with-java-how-does-it-work doc.getDocumentElement().normalize(); // interaction structure NodeList entryList = doc.getElementsByTagName("entry"); for (int i = 0; i < entryList.getLength(); i++) { Element entryElement = (Element) entryList.item(i); String dataset = entryElement.getAttribute("dataset"); String ac = entryElement.getElementsByTagName("accession").item(0).getFirstChild().getNodeValue(); MoleculeEntry uniprotEntry = new MoleculeEntry(ac); uniprotEntry.setDataset(dataset); // Taxid Element organism = (Element) entryElement.getElementsByTagName("organism").item(0); String organismCommonName = null; String organismScientificName = null; String organismOtherName = null; NodeList organismNames = organism.getElementsByTagName("name"); for (int j = 0; j < organismNames.getLength(); j++) { Element reference = (Element) organismNames.item(j); switch (reference.getAttribute("type")) { case "scientific": organismScientificName = reference.getTextContent(); break; case "common": organismCommonName = reference.getTextContent(); break; default: organismOtherName = reference.getTextContent(); break; } } if (null != organismCommonName) { uniprotEntry.setOrganism(organismCommonName); } else if (null != organismScientificName) { uniprotEntry.setOrganism(organismScientificName); } else if (null != organismOtherName) { uniprotEntry.setOrganism(organismOtherName); } NodeList organismReferences = organism.getElementsByTagName("dbReference"); for (int j = 0; j < organismReferences.getLength(); j++) { Element reference = (Element) organismReferences.item(j); if (reference.hasAttribute("type") && "NCBI Taxonomy".equals(reference.getAttribute("type"))) { String proteinTaxid = reference.getAttribute("id"); uniprotEntry.setTaxid(proteinTaxid); } } // GENE NodeList geneNames = entryElement.getElementsByTagName("gene"); for (int j = 0; j < geneNames.getLength(); j++) { Element gene = (Element) geneNames.item(j); NodeList nameList = gene.getElementsByTagName("name"); for (int k = 0; k < nameList.getLength(); k++) { Element name = (Element) nameList.item(k); uniprotEntry.addGeneName(name.getFirstChild().getNodeValue()); } } // modified residues HashMap<String, ModifiedResidue> modifiedResidues = new HashMap<>(); NodeList features = entryElement.getElementsByTagName("feature"); for (int j = 0; j < features.getLength(); j++) { Element feature = (Element) features.item(j); if (false == entryElement.equals(feature.getParentNode())) { continue; } // ensembl if (feature.hasAttribute("type") && "modified residue".equals(feature.getAttribute("type"))) { String description = feature.getAttribute("description").split(";")[0]; if (false == modifiedResidues.containsKey(description)) { modifiedResidues.put(description, new ModifiedResidue(description)); } NodeList locations = feature.getElementsByTagName("location"); for (int k = 0; k < locations.getLength(); k++) { Element loc = (Element) locations.item(k); NodeList positions = loc.getElementsByTagName("position"); for (int l = 0; l < positions.getLength(); l++) { Element position = (Element) positions.item(l); modifiedResidues.get(description).addPosition( new UniprotPosition(Integer.parseInt(position.getAttribute("position")))); } } } } uniprotEntry.getModifications().addAll(modifiedResidues.values()); // Xrefs: NodeList dbReferences = entryElement.getElementsByTagName("dbReference"); for (int j = 0; j < dbReferences.getLength(); j++) { Element dbReference = (Element) dbReferences.item(j); if (false == entryElement.equals(dbReference.getParentNode())) { continue; } NodeList molecules = dbReference.getElementsByTagName("molecule"); // ensembl if (dbReference.hasAttribute("type") && "Ensembl".equals(dbReference.getAttribute("type"))) { // transcript ID String id = dbReference.getAttribute("id"); for (int iMolecule = 0; iMolecule < molecules.getLength(); iMolecule++) { Element molecule = (Element) molecules.item(iMolecule); uniprotEntry.addXrefToVarSplice(id, molecule.getAttribute("id")); } uniprotEntry.addEnsemblGene(id); NodeList properties = dbReference.getElementsByTagName("property"); for (int k = 0; k < properties.getLength(); k++) { Element property = (Element) properties.item(k); if (property.hasAttribute("type") && "gene ID".equals(property.getAttribute("type"))) { uniprotEntry.addEnsemblGene(property.getAttribute("value")); } } } // refseq if (dbReference.hasAttribute("type") && "RefSeq".equals(dbReference.getAttribute("type"))) { NodeList properties = dbReference.getElementsByTagName("property"); for (int k = 0; k < properties.getLength(); k++) { Element property = (Element) properties.item(k); if (property.hasAttribute("type") && "nucleotide sequence ID".equals(property.getAttribute("type"))) { String id = property.getAttribute("value"); if (molecules.getLength() > 0) { for (int iMolecule = 0; iMolecule < molecules.getLength(); iMolecule++) { Element molecule = (Element) molecules.item(iMolecule); // If refseq, add also without the version uniprotEntry.addXrefToVarSplice(id, molecule.getAttribute("id")); uniprotEntry.addXrefToVarSplice(id.split("\\.")[0], molecule.getAttribute("id")); } } else { // If refseq, add also without the version uniprotEntry.addXrefToVarSplice(id, ac); uniprotEntry.addXrefToVarSplice(id.split("\\.")[0], ac); } uniprotEntry.addRefseq(id); } } } /* PDB chains will be imported from the webservice */ // PDB if (dbReference.hasAttribute("type") && "PDB".equals(dbReference.getAttribute("type"))) { NodeList properties = dbReference.getElementsByTagName("property"); String method = null; String chains = null; for (int k = 0; k < properties.getLength(); k++) { Element property = (Element) properties.item(k); if (property.hasAttribute("type") && "method".equals(property.getAttribute("type"))) { method = property.getAttribute("value"); } else if (property.hasAttribute("type") && "chains".equals(property.getAttribute("type"))) { chains = property.getAttribute("value"); } } if (method != null && "Model".equals(method)) { continue; } if (chains == null) { continue; } String pdb = dbReference.getAttribute("id"); uniprotEntry.addPDB(pdb, method); for (String chainElement : chains.split(",")) { try { String chainNames = chainElement.split("=")[0]; int start = Integer.parseInt(chainElement.split("=")[1].trim().split("-")[0]); int end = Integer .parseInt(chainElement.split("=")[1].trim().split("-")[1].replace(".", "")); for (String chainName : chainNames.split("/")) { uniprotEntry.addChain(pdb, new ChainMapping(pdb, chainName.trim(), start, end), method); } } catch (ArrayIndexOutOfBoundsException aiobe) { // IGBLogger.getInstance().warning( // "Cannot parse chain: " + chainElement // + ", skip"); } } } } // Sequence NodeList sequenceElements = entryElement.getElementsByTagName("sequence"); for (int j = 0; j < sequenceElements.getLength(); j++) { Element sequenceElement = (Element) sequenceElements.item(j); if (false == sequenceElement.getParentNode().equals(entryElement)) { continue; } String sequence = sequenceElement.getFirstChild().getNodeValue().replaceAll("\n", ""); uniprotEntry.setSequence(sequence); } // Diseases NodeList diseases = entryElement.getElementsByTagName("disease"); for (int j = 0; j < diseases.getLength(); j++) { Element disease = (Element) diseases.item(j); NodeList nameList = disease.getElementsByTagName("name"); for (int k = 0; k < nameList.getLength(); k++) { Element name = (Element) nameList.item(k); uniprotEntry.addDisease(name.getFirstChild().getNodeValue()); } } // Get fasta for all varsplice String fastaQuery = "http://www.uniprot.org/uniprot/" + uniprotEntry.getUniprotAc() + ".fasta?include=yes"; try { //HttpClient fastaClient = new DefaultHttpClient(); client.getParams().setParameter(ClientPNames.ALLOW_CIRCULAR_REDIRECTS, Boolean.TRUE); HttpGet fastaRequest = new HttpGet(fastaQuery); // add request header request.addHeader("User-Agent", USER_AGENT); HttpResponse fastaResponse = client.execute(fastaRequest); if (fastaResponse.getEntity().getContentLength() == 0) { continue; } InputStream is = fastaResponse.getEntity().getContent(); try { LinkedHashMap<String, ProteinSequence> fasta = FastaReaderHelper .readFastaProteinSequence(is); boolean mainSequence = true; for (ProteinSequence seq : fasta.values()) { // logger.info("Add sequence: " + seq.getAccession().getID() + " : " + seq.getSequenceAsString()); uniprotEntry.addSequence(seq.getAccession().getID(), seq.getSequenceAsString()); if (mainSequence) { uniprotEntry.setMainIsoform(seq.getAccession().getID()); mainSequence = false; } } } catch (Exception e) { logger.error("Cannot retrieve fasta for : " + uniprotEntry.getUniprotAc()); } } catch (IOException | IllegalStateException ex) { logger.error(null, ex); } uniprotEntries.add(uniprotEntry); } } catch (SAXParseException se) { // Nothing was return // IGBLogger.getInstance() // .error("Uniprot returns empty result: " + url); } catch (IOException | ParserConfigurationException | IllegalStateException | SAXException | DOMException | NumberFormatException e) { if (waitAndRetryOnFailure && allowedUniprotFailures > 0) { try { allowedUniprotFailures--; Thread.sleep(5000); return getUniprotEntriesXML(location, false); } catch (InterruptedException e1) { logger.error("Fail to retrieve data from " + location); throw new BridgesRemoteAccessException("Fail to retrieve data from Uniprot " + location); } } else { logger.error("Problem with Uniprot: " + url); throw new BridgesRemoteAccessException("Fail to retrieve data from Uniprot " + location); } } for (MoleculeEntry entry : uniprotEntries) { addToCache(entry); } return uniprotEntries; }