List of usage examples for java.util EnumMap get
public V get(Object key)
From source file:org.safecreative.api.wrapper.SafeCreativeAPIWrapperTest.java
/** * Test of getLicenseFeatures method, of class SafeCreativeAPIWrapper. *///from ww w .jav a2s . com @Test public void testGetLicenseFeatures() throws Exception { System.out.println("getLicenseFeatures"); EnumMap<License.Feature, LicenseFeatureObject> result = instance.getLicenseFeatures(); assertNotNull(result); assertFalse(result.isEmpty()); License.Feature feature = License.Feature.COMMERCIAL; assertNotNull(result.get(feature).getCode()); assertNotNull(result.get(feature).getShortName()); boolean someIsTrue = false; for (License.FeatureValue value : License.FeatureValue.values()) { if (result.get(feature).hasValue(value)) { someIsTrue = true; break; } } // has read some values assertTrue(someIsTrue); System.out.println("Result: " + result); }
From source file:it.unimi.di.big.mg4j.index.cluster.IndexCluster.java
/** Returns a new index cluster. * /*w ww .j av a2s . co m*/ * <p>This method uses the <samp>LOCALINDEX</samp> property to locate the local indices, * loads them (passing on <code>randomAccess</code>) and * builds a new index cluster using the appropriate implementing subclass. * * <p>Note that <code>documentSizes</code> is just passed to the local indices. This can be useful * in {@linkplain DocumentalCluster documental clusters}, as it allows local scoring, but it is useless in * {@linkplain LexicalCluster lexical clusters}, as scoring is necessarily centralised. In the * latter case, the property {@link it.unimi.di.big.mg4j.index.Index.UriKeys#SIZES} can be used to specify a global sizes file (which * usually comes from an original global index). * * @param basename the basename. * @param randomAccess whether the index should be accessible randomly. * @param documentSizes if true, document sizes will be loaded (note that sometimes document sizes * might be loaded anyway because the compression method for positions requires it). * @param queryProperties a map containing associations between {@link it.unimi.di.big.mg4j.index.Index.UriKeys} and values, or <code>null</code>. */ @SuppressWarnings("unchecked") static public Index getInstance(final CharSequence basename, final boolean randomAccess, final boolean documentSizes, final EnumMap<UriKeys, String> queryProperties) throws ConfigurationException, IOException, ClassNotFoundException, SecurityException, URISyntaxException, InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException { final Properties properties = new Properties(basename + DiskBasedIndex.PROPERTIES_EXTENSION); ClusteringStrategy strategy = null; Class<? extends ClusteringStrategy> strategyClass = null; if (properties.containsKey(PropertyKeys.STRATEGY)) strategy = (ClusteringStrategy) BinIO.loadObject(properties.getString(PropertyKeys.STRATEGY)); else if (properties.containsKey(PropertyKeys.STRATEGYCLASS)) try { strategyClass = (Class<? extends ClusteringStrategy>) MG4JClassParser.getParser() .parse(properties.getString(PropertyKeys.STRATEGYCLASS)); } catch (ParseException e) { throw new RuntimeException(e); } else throw new IllegalArgumentException( "Cluster properties must contain either a strategy or a strategy class property"); final Class<? extends IndexCluster> indexClass = (Class<? extends IndexCluster>) Class .forName(properties.getString(Index.PropertyKeys.INDEXCLASS, "(missing index class)")); String[] localBasename = properties.getStringArray(PropertyKeys.LOCALINDEX); Index[] localIndex = new Index[localBasename.length]; for (int i = 0; i < localIndex.length; i++) localIndex[i] = Index.getInstance(localBasename[i], randomAccess, documentSizes); final int numberOfDocuments = properties.getInt(Index.PropertyKeys.DOCUMENTS); final IntBigList sizes = queryProperties != null && queryProperties.containsKey(Index.UriKeys.SIZES) ? DiskBasedIndex.readSizes(queryProperties.get(Index.UriKeys.SIZES), numberOfDocuments) : null; if (sizes != null && documentSizes) LOGGER.warn( "You are loading both local sizes and a global size file specified by the \"size\" properties, which is usually nonsensical"); boolean hasCounts = true; boolean hasPositions = true; Payload payload = null; for (int i = 0; i < localIndex.length; i++) { hasCounts = hasCounts && localIndex[i].hasCounts; hasPositions = hasPositions && localIndex[i].hasPositions; if (i == 0) payload = localIndex[i].payload; if ((payload == null) != (localIndex[i].payload == null) || payload != null && !payload.compatibleWith(localIndex[i].payload)) throw new IllegalStateException("The payload specification of index " + localIndex[0] + " is not compatible with that of index " + localIndex[i]); } // We stem the names of Bloom filters from the index basename. BloomFilter<Void>[] termFilter = null; if (properties.getBoolean(DocumentalCluster.PropertyKeys.BLOOM)) { LOGGER.debug("Loading Bloom filters..."); termFilter = new BloomFilter[localIndex.length]; for (int i = 0; i < localIndex.length; i++) termFilter[i] = (BloomFilter<Void>) BinIO.loadObject(basename + "-" + i + BLOOM_EXTENSION); LOGGER.debug("Completed."); } // Let us rebuild the strategy in case it's a chained strategy if (strategyClass != null) { strategy = strategyClass.getConstructor(Index[].class, BloomFilter[].class).newInstance(localIndex, termFilter); } else { if (strategy instanceof ChainedLexicalClusteringStrategy) strategy = new ChainedLexicalClusteringStrategy(localIndex, termFilter); else if (strategy.numberOfLocalIndices() != localBasename.length) throw new IllegalArgumentException("The number of local indices of the strategy (" + localIndex.length + ") and the number of local indices specified by the property file (" + localBasename.length + ") differ"); } if (LexicalCluster.class.isAssignableFrom(indexClass)) return new LexicalCluster(localIndex, (LexicalClusteringStrategy) strategy, termFilter, numberOfDocuments, properties.getInt(Index.PropertyKeys.TERMS), properties.getLong(Index.PropertyKeys.POSTINGS), properties.getLong(Index.PropertyKeys.OCCURRENCES), properties.getInt(Index.PropertyKeys.MAXCOUNT), payload, hasCounts, hasPositions, Index.getTermProcessor(properties), properties.getString(Index.PropertyKeys.FIELD), sizes, properties); else if (DocumentalCluster.class.isAssignableFrom(indexClass)) { if (DocumentalConcatenatedCluster.class.isAssignableFrom(indexClass)) return new DocumentalConcatenatedCluster(localIndex, (DocumentalClusteringStrategy) strategy, properties.getBoolean(IndexCluster.PropertyKeys.FLAT), termFilter, numberOfDocuments, properties.getInt(Index.PropertyKeys.TERMS), properties.getLong(Index.PropertyKeys.POSTINGS), properties.getLong(Index.PropertyKeys.OCCURRENCES), properties.getInt(Index.PropertyKeys.MAXCOUNT), payload, hasCounts, hasPositions, Index.getTermProcessor(properties), properties.getString(Index.PropertyKeys.FIELD), sizes, properties); return new DocumentalMergedCluster(localIndex, (DocumentalClusteringStrategy) strategy, properties.getBoolean(IndexCluster.PropertyKeys.FLAT), termFilter, numberOfDocuments, properties.getInt(Index.PropertyKeys.TERMS), properties.getLong(Index.PropertyKeys.POSTINGS), properties.getLong(Index.PropertyKeys.OCCURRENCES), properties.getInt(Index.PropertyKeys.MAXCOUNT), payload, hasCounts, hasPositions, Index.getTermProcessor(properties), properties.getString(Index.PropertyKeys.FIELD), sizes, properties); } else throw new IllegalArgumentException("Unknown IndexCluster implementation: " + indexClass.getName()); }
From source file:org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.java
private void incrOpCount(FSEditLogOpCodes opCode, EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts, Step step, Counter counter) {/*from w w w . j a va 2 s . c o m*/ Holder<Integer> holder = opCounts.get(opCode); if (holder == null) { holder = new Holder<Integer>(1); opCounts.put(opCode, holder); } else { holder.held++; } counter.increment(); }
From source file:eu.ggnet.dwoss.report.ReportAgentBean.java
@Override public ViewReportResult prepareReport(ReportParameter p, boolean loadUnreported) { attachDanglingComplaints(p.getContractor(), p.getEnd()); List<ReportLine> findUnreportedUnits = reportLineEao.findUnreportedUnits(p.getContractor(), (loadUnreported) ? null : p.getStart(), p.getEnd()); EnumMap<ViewReportResult.Type, NavigableSet<ReportLine>> lines = new EnumMap<>(ViewReportResult.Type.class); PrepareReportPartition unitPartition = partition(findUnreportedUnits, p.getContractor()); lines.put(ACTIVE_INFO, unitPartition.getActiveInfo()); lines.put(REPORT_INFO, filterReportInfo(unitPartition.getReportAble())); lines.put(REPAYMENTS, filterRepayed(unitPartition.getReportAble())); switch (p.getViewMode()) { case DEFAULT: lines.put(INVOICED, filterInvoiced(unitPartition.getReportAble())); break;/* w w w. j a v a 2 s . c o m*/ case YEARSPLITT_AND_WARRANTIES: YearSplit filterInvoicedSplit = filterInvoicedSplit(unitPartition.getReportAble(), p.getStart()); lines.put(PAST_ONE_YEAR, filterInvoicedSplit.getAfter()); lines.put(UNDER_ONE_YEAR, filterInvoicedSplit.getBefore()); PrepareReportPartition warrantyPartition = partition( filterWarrenty(reportLineEao.findUnreportedWarrentys(), unitPartition.getReportAble()), p.getContractor()); lines.put(WARRENTY, filterInvoiced(warrantyPartition.getReportAble())); lines.get(ACTIVE_INFO).addAll(warrantyPartition.getActiveInfo()); lines.get(REPAYMENTS).addAll(filterRepayed(warrantyPartition.getReportAble())); lines.get(REPORT_INFO).addAll(filterReportInfo(warrantyPartition.getReportAble())); break; } ViewReportResult viewReportResult = new ViewReportResult(lines, p); viewReportResult.getAllLines().stream().forEach((allLine) -> reportEm.detach(allLine)); if (!marginCalculator.isUnsatisfied()) marginCalculator.get().recalc(viewReportResult); return viewReportResult; }
From source file:org.codehaus.mojo.license.AbstractFileHeaderMojo.java
/** * Checks the results of the mojo execution using the {@link #isFailOnMissingHeader()} and * {@link #isFailOnNotUptodateHeader()}. * * @param result processed files by their status * @throws MojoFailureException if check is not ok (some file with no header or to update) */// www .j av a2s .co m protected void checkResults(EnumMap<FileState, Set<File>> result) throws MojoFailureException { Set<FileState> states = result.keySet(); StringBuilder builder = new StringBuilder(); if (isDryRun() && isFailOnMissingHeader() && states.contains(FileState.add)) { List<File> files = FileUtil.orderFiles(result.get(FileState.add)); builder.append("There are ").append(files.size()).append(" file(s) with no header :"); for (File file : files) { builder.append("\n").append(file); } } if (isDryRun() && isFailOnNotUptodateHeader() && states.contains(FileState.update)) { List<File> files = FileUtil.orderFiles(result.get(FileState.update)); builder.append("\nThere are ").append(files.size()).append(" file(s) with header to update:"); for (File file : files) { builder.append("\n").append(file); } } String message = builder.toString(); if (StringUtils.isNotBlank(message)) { throw new MojoFailureException(builder.toString()); } }
From source file:tasly.greathealth.oms.web.inventory.rest.resources.ItemInfoResource.java
@SuppressWarnings("null") @PUT/*from w w w.j a v a 2 s .c om*/ @Path("/flag0skus") public Response updateFlagSku(final SkuList skuList) throws Exception { int flag; EnumMap<HandleReturn, Object> handleReturn = new EnumMap<HandleReturn, Object>(HandleReturn.class); boolean updateStatus = false; final List<String> skus = skuList.getSkus(); if (skus.size() > 0 || skus != null) { for (final String sku : skus) { final ItemInfo itemInfo = itemInfoFacade.getBySku(sku); flag = itemInfo.getStockManageFlag(); if (flag == 0) { final List<TaslyItemLocationData> checkTaslyItemLocationDatas = taslyItemLocationService .getByItemID(sku); if (checkTaslyItemLocationDatas == null || checkTaslyItemLocationDatas.size() == 0) { omsLOG.error("sku:" + sku + ",no ItemLocation data!"); continue; } else { try { handleReturn = itemQuantityService.handleUpdateMethod(checkTaslyItemLocationDatas, sku, flag, 0); } catch (final Exception e) { omsLOG.error("handle sku:" + sku + " failed and error is " + e); handleReturn.put(HandleReturn.handleStatus, false); } if ((boolean) handleReturn.get(HandleReturn.handleStatus)) { try { updateStatus = itemQuantityService.updateMethod(sku, flag, 0); if (updateStatus) { omsLOG.debug("sku:" + sku + ",flag=0 allocated ok!"); } } catch (final Exception e) { omsLOG.error("update sku:" + sku + " failed and error is " + e); } } } } } omsLOG.info("Update quantity where flag=0 finished."); } return Response.status(Response.Status.OK).build(); }
From source file:tasly.greathealth.oms.web.inventory.rest.resources.ItemInfoResource.java
@SuppressWarnings("null") @GET//from w w w .ja va 2s . c om @Path("/flag0update") public Response updateQuantity() { omsLOG.info("Begin to update quantity where flag=0."); final Collection<ItemInfo> itemInfos = itemInfoFacade.getAll(); int flag; String sku; EnumMap<HandleReturn, Object> handleReturn = new EnumMap<HandleReturn, Object>(HandleReturn.class); boolean updateStatus = false; if (itemInfos.size() == 0 || itemInfos == null) { omsLOG.error("Get all itemInfos failed!"); } else { for (final ItemInfo itemInfo : itemInfos) { flag = itemInfo.getStockManageFlag(); sku = itemInfo.getSku(); if (flag == 0) { final List<TaslyItemLocationData> checkTaslyItemLocationDatas = taslyItemLocationService .getByItemID(sku); if (checkTaslyItemLocationDatas == null || checkTaslyItemLocationDatas.size() == 0) { omsLOG.error("sku:" + sku + ",no ItemLocation data!"); continue; } else { try { handleReturn = itemQuantityService.handleUpdateMethod(checkTaslyItemLocationDatas, sku, flag, 0); } catch (final Exception e) { omsLOG.error("handle sku:" + sku + " failed and error is " + e); handleReturn.put(HandleReturn.handleStatus, false); } if ((boolean) handleReturn.get(HandleReturn.handleStatus)) { try { updateStatus = itemQuantityService.updateMethod(sku, flag, 0); if (updateStatus) { omsLOG.debug("sku:" + sku + ",flag=0 allocated ok!"); } } catch (final Exception e) { omsLOG.error("update sku:" + sku + " failed and error is " + e); } } } } } } omsLOG.info("Update quantity where flag=0 finished."); return Response.status(Response.Status.OK).build(); }
From source file:gov.nih.nci.caarray.web.action.project.ProjectFilesAction.java
/** * Computes a file status count for each type of file status. * * @return Map, map contains key value pair (status, count) *//*ww w . j a v a2 s.c o m*/ public EnumMap<FileStatus, Integer> computeFileStatusCounts() { final EnumMap<FileStatus, Integer> countMap = new EnumMap<FileStatus, Integer>(FileStatus.class); for (final FileStatus f : FileStatus.values()) { countMap.put(f, 0); } for (final CaArrayFile f : getFiles()) { countMap.put(FileStatus.valueOf(f.getStatus()), countMap.get(FileStatus.valueOf(f.getStatus())) + 1); } return countMap; }
From source file:org.apache.hadoop.hbase.io.hfile.TestCacheOnWrite.java
private void readStoreFile(boolean useTags) throws IOException { AbstractHFileReader reader;//from w ww . j a v a2s. com if (useTags) { reader = (HFileReaderV3) HFile.createReader(fs, storeFilePath, cacheConf, conf); } else { reader = (HFileReaderV2) HFile.createReader(fs, storeFilePath, cacheConf, conf); } LOG.info("HFile information: " + reader); final boolean cacheBlocks = false; final boolean pread = false; HFileScanner scanner = reader.getScanner(cacheBlocks, pread); assertTrue(testDescription, scanner.seekTo()); long offset = 0; HFileBlock prevBlock = null; EnumMap<BlockType, Integer> blockCountByType = new EnumMap<BlockType, Integer>(BlockType.class); DataBlockEncoding encodingInCache = encoderType.getEncoder().getDataBlockEncoding(); while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { long onDiskSize = -1; if (prevBlock != null) { onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader(); } // Flags: don't cache the block, use pread, this is not a compaction. // Also, pass null for expected block type to avoid checking it. HFileBlock block = reader.readBlock(offset, onDiskSize, false, true, false, true, null, encodingInCache); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); boolean isCached = blockCache.getBlock(blockCacheKey, true, false, true) != null; boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); if (shouldBeCached != isCached) { throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "encodingInCache: " + encodingInCache + "\n" + "blockCacheKey: " + blockCacheKey); } prevBlock = block; offset += block.getOnDiskSizeWithHeader(); BlockType bt = block.getBlockType(); Integer count = blockCountByType.get(bt); blockCountByType.put(bt, (count == null ? 0 : count) + 1); } LOG.info("Block count by type: " + blockCountByType); String countByType = blockCountByType.toString(); BlockType cachedDataBlockType = encoderType.encode ? BlockType.ENCODED_DATA : BlockType.DATA; if (useTags) { assertEquals("{" + cachedDataBlockType + "=1550, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=20}", countByType); } else { assertEquals("{" + cachedDataBlockType + "=1379, LEAF_INDEX=154, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=18}", countByType); } reader.close(); }
From source file:hu.mta.sztaki.lpds.cloud.simulator.iaas.PhysicalMachine.java
/** * Defines a new physical machine, ensures that there are no VMs running so * far/*from w ww. j av a2 s .c om*/ * * @param cores * defines the number of CPU cores this machine has under control * @param perCorePocessing * defines the processing capabilities of a single CPU core in * this machine (in instructions/tick) * @param memory * defines the total physical memory this machine has under * control (in bytes) * @param disk * defines the local physical disk & networking this machine has * under control * @param turnonOperations * defines the tasks to execute before the PM can be turned on - * this can be considered as the simulation of the boot process. * for the complete definition of this array have a look at the * powerstatedelayer class. * @param switchoffOperations * defines the tasks to execute before the PM can be switched off * - this can be considered as the simulation of the shutdown * process. for the complete definition of this array have a look * at the powerstatedelayer class. * @param powerTransitions * determines the applied power state transitions while the * physical machine state changes. This is the principal way to * alter a PM's energy consumption behavior. */ public PhysicalMachine(double cores, double perCorePocessing, long memory, Repository disk, double[] turnonOperations, double[] switchoffOperations, EnumMap<PowerStateKind, EnumMap<State, PowerState>> powerTransitions) { super(cores * perCorePocessing); // Init resources: totalCapacities = new ConstantConstraints(cores, perCorePocessing, memory); internalAvailableCaps = new AlterableResourceConstraints(totalCapacities); availableCapacities = new UnalterableConstraintsPropagator(internalAvailableCaps); internalReallyFreeCaps = new AlterableResourceConstraints(totalCapacities); freeCapacities = new UnalterableConstraintsPropagator(internalReallyFreeCaps); localDisk = disk; hostPowerBehavior = powerTransitions.get(PowerStateKind.host); storagePowerBehavior = powerTransitions.get(PowerStateKind.storage); networkPowerBehavior = powerTransitions.get(PowerStateKind.network); onTransition = new double[turnonOperations.length]; onDelayEstimate = prepareTransitionalTasks(true, turnonOperations); offTransition = new double[switchoffOperations.length]; offDelayEstimate = prepareTransitionalTasks(false, switchoffOperations); if (hostPowerBehavior == null || storagePowerBehavior == null || networkPowerBehavior == null) { throw new IllegalStateException( "Cannot initialize physical machine without a complete power behavior set"); } setState(State.OFF); directConsumer = new MaxMinConsumer(getPerTickProcessingPower()); }