List of usage examples for java.util HashSet size
public int size()
From source file:org.apache.storm.loadgen.LoadMetricsServer.java
private void outputMetrics(Nimbus.Iface client, Collection<String> names) throws Exception { ClusterSummary summary = client.getClusterInfo(); Set<String> ids = new HashSet<>(); for (TopologySummary ts : summary.get_topologies()) { if (names.contains(ts.get_name())) { ids.add(ts.get_id());//from ww w .ja va2 s . com } } if (ids.size() != names.size()) { throw new Exception("Could not find all topologies: " + names); } HashSet<String> workers = new HashSet<>(); HashSet<String> hosts = new HashSet<>(); int executors = 0; int uptime = 0; long acked = 0; long failed = 0; double totalLatMs = 0; long totalLatCount = 0; for (String id : ids) { TopologyInfo info = client.getTopologyInfo(id); @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") TopologyPageInfo tpi = client.getTopologyPageInfo(id, ":all-time", false); uptime = Math.max(uptime, info.get_uptime_secs()); for (ExecutorSummary exec : info.get_executors()) { hosts.add(exec.get_host()); workers.add(exec.get_host() + exec.get_port()); executors++; if (exec.get_stats() != null && exec.get_stats().get_specific() != null && exec.get_stats().get_specific().is_set_spout()) { SpoutStats stats = exec.get_stats().get_specific().get_spout(); Map<String, Long> failedMap = stats.get_failed().get(":all-time"); Map<String, Long> ackedMap = stats.get_acked().get(":all-time"); if (ackedMap != null) { for (String key : ackedMap.keySet()) { if (failedMap != null) { Long tmp = failedMap.get(key); if (tmp != null) { failed += tmp; } } long ackVal = ackedMap.get(key); acked += ackVal; } } } } Double latency = tpi.get_topology_stats().get_window_to_complete_latencies_ms().get(":all-time"); Long latAcked = tpi.get_topology_stats().get_window_to_acked().get(":all-time"); if (latency != null && latAcked != null) { totalLatCount += latAcked; totalLatMs += (latAcked * latency); } } @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") long failedThisTime = failed - prevFailed; @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") long ackedThisTime = acked - prevAcked; @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") long thisTime = uptime - prevUptime; prevUptime = uptime; prevAcked = acked; prevFailed = failed; Histogram copy = new Histogram(3600000000000L, 3); ; synchronized (histo) { copy.add(histo); histo.reset(); } long user = userCpu.getAndSet(0); long sys = systemCpu.getAndSet(0); long gc = gcMs.getAndSet(0); long skippedMaxSpout = skippedMaxSpoutMs.getAndSet(0); long memBytes = readMemory(); allCombined.add(new Measurements(uptime, ackedThisTime, thisTime, failedThisTime, copy, user, sys, gc, memBytes, ids, workers.size(), executors, hosts.size(), congested.getAndSet(new ConcurrentHashMap<>()), skippedMaxSpout, totalLatMs / totalLatCount)); Measurements inWindow = Measurements.combine(allCombined, null, windowLength); for (MetricResultsReporter reporter : reporters) { reporter.reportWindow(inWindow, allCombined); } }
From source file:com.dell.asm.asmcore.asmmanager.util.ServiceTemplateValidator.java
/** * Validate server network configuration * @param component/* www.j a v a 2s .c o m*/ */ public void validateNetworks(ServiceTemplateComponent component, final Map<String, String> repoToTaskMap) { // for use with partition mask int M_PXE = 0x0000001; int M_HMGMT = 0x0000010; int M_HMGRN = 0x0000100; int M_HCLST = 0x0001000; int M_ISCSI = 0x0010000; int M_FILE = 0x0100000; int M_OTHER = 0x1000000; ServiceTemplateSetting networking = component .getTemplateSetting(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_NETWORK_CONFIG_ID); if (networking == null) { // some server components may not include networking configuration return; } // Skip network validation of hardware only configuration if (ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_COMPID_HW.equals(component.getComponentID())) { return; } com.dell.asm.asmcore.asmmanager.client.networkconfiguration.NetworkConfiguration networkConfig = getServiceTemplateUtil() .deserializeNetwork(networking.getValue()); if (networkConfig == null) { LOGGER.warn("No networking configuration on server component: " + component.getAsmGUID()); return; } boolean isSanBoot = component.hasSanBoot(); boolean isNoneBoot = component.hasNoneBoot(); boolean hasESX = component.hasESX(repoToTaskMap); boolean hasHyperV = component.hasHyperV(); boolean hasBareMetalOS = component.hasBareMetal(repoToTaskMap); boolean isISCSIBoot = component.hasSanISCSIBoot(); List<String> vMotionNetworks = new ArrayList<String>(); List<String> pxeNetworks = new ArrayList<String>(); List<String> hypManangementNetworks = new ArrayList<String>(); List<String> vsanNetworks = new ArrayList<String>(); List<String> fipsNetworks = new ArrayList<String>(); boolean hasPXE = false; boolean hasStaticPXE = false; boolean hasPXEOnPartNot1 = false; boolean hasHypervisorMgmt = false; boolean hasHMOnPart1 = false; boolean hasHypervisorMigration = false; boolean hasHypervisorCluster = false; boolean hasISCSI = false; boolean hasHypervisorMgmtStatic = false; boolean hasHypervisorMigrationStatic = false; boolean hasHypervisorClusterStatic = false; boolean hasISCSIStatic = false; boolean hasInvalidPartitionNetwork = false; boolean hasOtherStatic = false; boolean componentInvalidForEsx = false; String errorCase = null; List<Interface> interfaces = networkConfig.getUsedInterfaces(); HashSet<String> partitionNetworkTypes = new HashSet<String>(); List<List<String>> workloadNetworksCheck = new ArrayList<List<String>>(); List<String> bareMetalOsNetworkCheck = new ArrayList<String>(); boolean iscsiOnPort1 = false; boolean foundSingleISCSIOnPort1 = false; boolean isBootDeviceFc = false; boolean fcNetworkPresent = false; final ServiceTemplateValid componentValid = component.getComponentValid(); for (ServiceTemplateCategory category : safeList(component.getResources())) { for (ServiceTemplateSetting setting : safeList(category.getParameters())) { if ((setting.getId() .equalsIgnoreCase(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_TARGET_BOOTDEVICE_ID) && setting.getValue() .equals(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_TARGET_BOOTDEVICE_FC))) { isBootDeviceFc = true; } if (((setting.getId() .equalsIgnoreCase(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_NETWORK_CONFIG_ID) && setting.getValue().toLowerCase().contains("\"usedforfc\":true"))) || ((setting.getId().equalsIgnoreCase( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_NETWORK_CONFIG_ID) && setting.getValue().toLowerCase().contains("\"fabrictype\":\"fc\"")))) { fcNetworkPresent = true; } } } if (isBootDeviceFc && !fcNetworkPresent) { LOGGER.error("fcNetworksError"); componentValid.addMessage(AsmManagerMessages.fcNwtworksValidation()); componentValid.setValid(Boolean.FALSE); } for (Interface interfaceObject : interfaces) { if (interfaceObject.isPartitioned() && hasHyperV) { // stop vaidation here and return componentValid.addMessage(AsmManagerMessages.serverWithHyperVPartitioned()); componentValid.setValid(Boolean.FALSE); return; } List<Partition> partitions = interfaceObject.getPartitions(); for (Partition partition : partitions) { partitionNetworkTypes.clear(); bareMetalOsNetworkCheck.clear(); List<String> networkIds = partition.getNetworks(); Integer curMask = 0; if (networkIds != null) { for (String networkId : networkIds) { Network networkObject = getNetworkService().getNetwork(networkId); // In general networkObject should never be null, but ran into cases with // invalid default templates where it was. String networkType = networkObject == null ? "" : networkObject.getType().value(); boolean isStatic = networkObject != null && networkObject.isStatic(); if (networkType.equals(NetworkType.STORAGE_ISCSI_SAN.value())) { hasISCSI = true; curMask |= M_ISCSI; hasISCSIStatic = isStatic; if (isSanBoot && !isStatic) { // iSCSI must be static fo SAN boot iSCSI componentValid.addMessage(AsmManagerMessages.iscsiMustHaveStatic()); } } else if (networkType.equals(NetworkType.PXE.value())) { hasPXE = true; curMask |= M_PXE; if (networkObject.isStatic()) { hasStaticPXE = true; } if (!partition.getName().equals("1")) hasPXEOnPartNot1 = true; } else if (networkType.equals(NetworkType.HYPERVISOR_MANAGEMENT.value())) { hasHypervisorMgmt = true; curMask |= M_HMGMT; if (partition.getName().equals("1")) hasHMOnPart1 = true; hasHypervisorMgmtStatic = isStatic; } else if (networkType.equals(NetworkType.HYPERVISOR_CLUSTER_PRIVATE.value())) { hasHypervisorCluster = true; hasHypervisorClusterStatic = isStatic; curMask |= M_HCLST; } else if (networkType.equals(NetworkType.HYPERVISOR_MIGRATION.value())) { hasHypervisorMigration = true; hasHypervisorMigrationStatic = isStatic; curMask |= M_HMGRN; } else if (networkType.equals(NetworkType.FILESHARE.value())) { curMask |= M_FILE; } else { curMask |= M_OTHER; if (isStatic) { hasOtherStatic = true; } } if (hasESX) { if (networkType.equals(NetworkType.HYPERVISOR_MIGRATION.value())) { vMotionNetworks.add(networkId); } if (networkType.equals(NetworkType.PXE.value())) { pxeNetworks.add(networkId); } if (networkType.equals(NetworkType.VSAN.value())) { vsanNetworks.add(networkId); } if (networkType.equals(NetworkType.FIP_SNOOPING.value())) { fipsNetworks.add(networkId); } if (networkType.equals(NetworkType.HYPERVISOR_MANAGEMENT.value())) { hypManangementNetworks.add(networkId); } if (networkType.equals(NetworkType.PRIVATE_LAN.value()) || networkType.equals(NetworkType.PUBLIC_LAN.value())) { List<String> netTemp = new ArrayList<String>(); for (String netId : networkIds) { if (NetworkType.PRIVATE_LAN .equals(getNetworkService().getNetwork(netId).getType()) || NetworkType.PUBLIC_LAN .equals(getNetworkService().getNetwork(netId).getType())) { netTemp.add(netId); } } workloadNetworksCheck.add(netTemp); } partitionNetworkTypes.add(networkType); } if (hasBareMetalOS) { if (!hasInvalidPartitionNetwork) { // partition index, number, etc. is always 0; use the name to identify the partition index if (component.hasLinuxOS() && !partition.getName().equals("1")) { hasInvalidPartitionNetwork = true; componentValid.addMessage( AsmManagerMessages.invalidPartitionForNetwork(component.getName())); } } bareMetalOsNetworkCheck.add(networkType); } } } if (hasBareMetalOS && bareMetalOsNetworkCheck.size() != 0) { boolean validNets = true; for (String netType : bareMetalOsNetworkCheck) { if (!netType.equals(NetworkType.PXE.value()) && !netType.equals(NetworkType.PRIVATE_LAN.value()) && !netType.equals(NetworkType.PUBLIC_LAN.value()) && !netType.equals(NetworkType.STORAGE_FCOE_SAN.value()) && !netType.equals(NetworkType.FIP_SNOOPING.value()) && !netType.equals(NetworkType.FILESHARE.value()) && !netType.equals(NetworkType.STORAGE_ISCSI_SAN.value())) { validNets = false; LOGGER.error("incorrectNetworksOnBareMetalOs"); componentValid .addMessage(AsmManagerMessages.incorrectNetworkConfForBareMetalAndLinux2()); break; } } if (!validNets) break; } if (hasESX) { if (partitionNetworkTypes.contains(NetworkType.FIP_SNOOPING.value()) && !(partitionNetworkTypes.contains(NetworkType.FIP_SNOOPING.value()) && partitionNetworkTypes.contains(NetworkType.STORAGE_FCOE_SAN.value()))) { componentInvalidForEsx = true; errorCase = ERROR_DUPLICATE_NETWORK_TYPE; LOGGER.error( "networkTypeDuplicateOnSamePartition - FIP Snooping FCOE Networks Partitions Error"); } } // checks per port // Bare metal OS if (hasBareMetalOS) { if (interfaceObject.getName().equals("Port 1")) { // In the case of boot from iSCSI, iSCSI network must be selected for one of the NIC port1. And no other network can be seleced on the same port. if (isISCSIBoot && hasISCSI) { iscsiOnPort1 = true; } if (isISCSIBoot && hasISCSI && curMask == M_ISCSI) { foundSingleISCSIOnPort1 = true; } } } // ignore all but first partitions if (!interfaceObject.isPartitioned()) break; // Different nic types have different number of partitions but data may include more that should be ignored if (partition.getName().equals(Integer.toString(interfaceObject.getMaxPartitions()))) { break; } } } if (hasBareMetalOS) { // In the case of boot from iSCSI, iSCSI network must be selected for one of the NIC port1. And no other network can be seleced on the same port. if (isISCSIBoot && !iscsiOnPort1) { componentValid.addMessage(AsmManagerMessages.iscsiMustBeOnPort1()); } if (isISCSIBoot && hasISCSI && !foundSingleISCSIOnPort1) { componentValid.addMessage(AsmManagerMessages.iscsiMustBeTheOnlyNetwork()); } } // For any cases that requires ASM to deploy OS, need to make sure PXE network is selected. // this is required for full server component if (ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_COMPID_ALL.equals(component.getComponentID())) { if (!isSanBoot && !isNoneBoot && !hasPXE) { componentValid.addMessage(AsmManagerMessages.serverMustHavePXE()); } else { // Installing Hyper-V or Windows using a static OS Installation network is not currently supported if (hasStaticPXE) { ServiceTemplateSetting osVersion = component.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_OS_RESOURCE, ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_OS_VERSION_ID); } if (hasBareMetalOS) { if (hasPXE && hasPXEOnPartNot1) componentValid.addMessage(AsmManagerMessages.wrongPartition(NetworkType.PXE.value(), 1)); } // In the case for Hyper-v, make sure required networks are selected. // Hypervisor Mgmt, Hypervisor Migration, Hypervisor Cluster private, PXE, iSCSI // no partitions if (hasHyperV) { if (!hasPXE) componentValid.addMessage( AsmManagerMessages.serverMissedNetwork("Hyper-V", NetworkType.PXE.value())); else if (hasPXE && hasPXEOnPartNot1) componentValid.addMessage(AsmManagerMessages.wrongPartition(NetworkType.PXE.value(), 1)); else if (!hasHypervisorMgmt) componentValid.addMessage(AsmManagerMessages.serverMissedNetwork("Hyper-V", NetworkType.HYPERVISOR_MANAGEMENT.value())); else if (hasHypervisorMgmt && !hasHMOnPart1) componentValid.addMessage( AsmManagerMessages.wrongPartition(NetworkType.HYPERVISOR_MANAGEMENT.value(), 1)); else if (!hasHypervisorMigration) componentValid.addMessage(AsmManagerMessages.serverMissedNetwork("Hyper-V", NetworkType.HYPERVISOR_MIGRATION.value())); else if (!hasHypervisorCluster) componentValid.addMessage(AsmManagerMessages.serverMissedNetwork("Hyper-V", NetworkType.HYPERVISOR_CLUSTER_PRIVATE.value())); if (hasISCSI && !hasISCSIStatic) { componentValid.addMessage( AsmManagerMessages.hypervRequiresStatic(NetworkType.STORAGE_ISCSI_SAN.value())); } else if (!hasHypervisorMgmtStatic) { componentValid.addMessage( AsmManagerMessages.hypervRequiresStatic(NetworkType.HYPERVISOR_MANAGEMENT.value())); } else if (!hasHypervisorMigrationStatic) { componentValid.addMessage( AsmManagerMessages.hypervRequiresStatic(NetworkType.HYPERVISOR_MIGRATION.value())); } else if (!hasHypervisorClusterStatic) { componentValid.addMessage(AsmManagerMessages .hypervRequiresStatic(NetworkType.HYPERVISOR_CLUSTER_PRIVATE.value())); } } // In the case for ESXi, make sure required networks are selected. // Hypervisor Mgmt, Hypervisor Migration, Hypervisor Cluster private, PXE must be partition 1 if (hasESX) { HashSet<String> duplicateNetworkCheck; if (!hasPXE) componentValid .addMessage(AsmManagerMessages.serverMissedNetwork("ESX", NetworkType.PXE.value())); else if (hasPXE && hasPXEOnPartNot1) componentValid.addMessage(AsmManagerMessages.wrongPartition(NetworkType.PXE.value(), 1)); else if (!hasHypervisorMgmt) componentValid.addMessage(AsmManagerMessages.serverMissedNetwork("ESX", NetworkType.HYPERVISOR_MANAGEMENT.value())); if (workloadNetworksCheck.size() > 1) { for (List<String> partitionNetworks : workloadNetworksCheck) { if (!(partitionNetworks.containsAll(workloadNetworksCheck.get(1)) && workloadNetworksCheck.get(1).containsAll(partitionNetworks))) { componentInvalidForEsx = true; errorCase = ERROR_WORKLOAD_NETS_NOT_SAME; LOGGER.error(ERROR_WORKLOAD_NETS_NOT_SAME); } } } duplicateNetworkCheck = new HashSet<>(pxeNetworks); if (duplicateNetworkCheck.size() > 1) { componentInvalidForEsx = true; errorCase = ERROR_DUPLICATE_NETWORKS; LOGGER.error("duplicateNetworkCheck - PXE"); } duplicateNetworkCheck = new HashSet<>(vMotionNetworks); if (duplicateNetworkCheck.size() > 1) { componentInvalidForEsx = true; errorCase = ERROR_DUPLICATE_NETWORKS; LOGGER.error("duplicateNetworkCheck - vMotionNetworks"); } duplicateNetworkCheck = new HashSet<>(hypManangementNetworks); if (duplicateNetworkCheck.size() > 1) { componentInvalidForEsx = true; errorCase = ERROR_DUPLICATE_NETWORKS; LOGGER.error("duplicateNetworkCheck - hypManangementNetworks"); } duplicateNetworkCheck = new HashSet<>(fipsNetworks); if (duplicateNetworkCheck.size() > 1) { componentInvalidForEsx = true; errorCase = ERROR_DUPLICATE_NETWORKS; LOGGER.error("duplicateNetworkCheck - fipsNetworks"); } if (componentInvalidForEsx) { if (ERROR_DUPLICATE_NETWORKS.equals(errorCase)) componentValid.addMessage(AsmManagerMessages.networksDuplicate()); if (ERROR_DUPLICATE_NETWORK_TYPE.equals(errorCase)) componentValid.addMessage(AsmManagerMessages.networkTypeDuplicate()); if (ERROR_WORKLOAD_NETS_NOT_SAME.equals(errorCase)) componentValid.addMessage(AsmManagerMessages.workloadNetworksNotSame()); } } } } if (CollectionUtils.isNotEmpty(componentValid.getMessages())) { componentValid.setValid(Boolean.FALSE); } }
From source file:org.restsql.core.impl.AbstractSqlResourceMetaData.java
/** * Builds table and column meta data.//from w ww . j a va 2 s . c o m * * @throws SqlResourceException */ @SuppressWarnings("fallthrough") private void buildTablesAndColumns(final SqlRowSet resultSet) throws SqlResourceException { final SqlRowSetMetaData resultSetMetaData = resultSet.getMetaData(); final int columnCount = resultSetMetaData.getColumnCount(); allReadColumns = new ArrayList<ColumnMetaData>(columnCount); parentReadColumns = new ArrayList<ColumnMetaData>(columnCount); childReadColumns = new ArrayList<ColumnMetaData>(columnCount); tableMap = new HashMap<String, TableMetaData>(DEFAULT_NUMBER_TABLES); tables = new ArrayList<TableMetaData>(DEFAULT_NUMBER_TABLES); childPlusExtTables = new ArrayList<TableMetaData>(DEFAULT_NUMBER_TABLES); parentPlusExtTables = new ArrayList<TableMetaData>(DEFAULT_NUMBER_TABLES); final HashSet<String> databases = new HashSet<String>(DEFAULT_NUMBER_DATABASES); for (int colNumber = 1; colNumber <= columnCount; colNumber++) { final String databaseName, qualifiedTableName, tableName; // boolean readOnly = isColumnReadOnly(resultSetMetaData, // colNumber); // if (readOnly) { databaseName = SqlResourceDefinitionUtils.getDefaultDatabase(definition); tableName = SqlResourceDefinitionUtils.getTable(definition, TableRole.Parent).getName(); qualifiedTableName = getQualifiedTableName(databaseName, tableName); final ColumnMetaDataImpl column = new ColumnMetaDataImpl(colNumber, databaseName, qualifiedTableName, tableName, getColumnName(definition, resultSetMetaData, colNumber), resultSetMetaData.getColumnLabel(colNumber), resultSetMetaData.getColumnTypeName(colNumber), resultSetMetaData.getColumnType(colNumber), true, this); TableMetaDataImpl table = (TableMetaDataImpl) tableMap.get(column.getQualifiedTableName()); if (table == null) { // Create table metadata object and add to special references final Table tableDef = SqlResourceDefinitionUtils.getTable(definition, column); if (tableDef == null) { throw new SqlResourceException("Definition requires table element for " + column.getTableName() + ", referenced by column " + column.getColumnLabel()); } table = new TableMetaDataImpl(tableName, qualifiedTableName, databaseName, TableRole.valueOf(tableDef.getRole())); tableMap.put(column.getQualifiedTableName(), table); tables.add(table); switch (table.getTableRole()) { case Parent: parentTable = table; if (tableDef.getAlias() != null) { table.setTableAlias(tableDef.getAlias()); } // fall through case ParentExtension: parentPlusExtTables.add(table); break; case Child: childTable = table; if (tableDef.getAlias() != null) { table.setTableAlias(tableDef.getAlias()); } // fall through case ChildExtension: childPlusExtTables.add(table); break; case Join: // unlikely to be in the select columns, but just in // case joinTable = table; joinList = new ArrayList<TableMetaData>(1); joinList.add(joinTable); break; default: // Unknown } } // Add column to the table table.addColumn(column); column.setTableRole(table.getTableRole()); // Add column to special column lists allReadColumns.add(column); switch (table.getTableRole()) { case Parent: case ParentExtension: parentReadColumns.add(column); break; case Child: case ChildExtension: childReadColumns.add(column); break; default: // Unknown } } // Determine number of databases multipleDatabases = databases.size() > 1; }
From source file:org.apache.accumulo.server.tabletserver.Tablet.java
private LookupResult lookup(SortedKeyValueIterator<Key, Value> mmfi, List<Range> ranges, HashSet<Column> columnSet, ArrayList<KVEntry> results, long maxResultsSize) throws IOException { LookupResult lookupResult = new LookupResult(); boolean exceededMemoryUsage = false; boolean tabletClosed = false; Set<ByteSequence> cfset = null; if (columnSet.size() > 0) cfset = LocalityGroupUtil.families(columnSet); for (Range range : ranges) { if (exceededMemoryUsage || tabletClosed) { lookupResult.unfinishedRanges.add(range); continue; }// w w w .ja v a 2 s . c om int entriesAdded = 0; try { if (cfset != null) mmfi.seek(range, cfset, true); else mmfi.seek(range, LocalityGroupUtil.EMPTY_CF_SET, false); while (mmfi.hasTop()) { Key key = mmfi.getTopKey(); KVEntry kve = new KVEntry(key, mmfi.getTopValue()); results.add(kve); entriesAdded++; lookupResult.bytesAdded += kve.estimateMemoryUsed(); lookupResult.dataSize += kve.numBytes(); exceededMemoryUsage = lookupResult.bytesAdded > maxResultsSize; if (exceededMemoryUsage) { addUnfinishedRange(lookupResult, range, key, false); break; } mmfi.next(); } } catch (TooManyFilesException tmfe) { // treat this as a closed tablet, and let the client retry log.warn("Tablet " + getExtent() + " has too many files, batch lookup can not run"); handleTabletClosedDuringScan(results, lookupResult, exceededMemoryUsage, range, entriesAdded); tabletClosed = true; } catch (IOException ioe) { if (shutdownInProgress()) { // assume HDFS shutdown hook caused this exception log.debug("IOException while shutdown in progress ", ioe); handleTabletClosedDuringScan(results, lookupResult, exceededMemoryUsage, range, entriesAdded); tabletClosed = true; } else { throw ioe; } } catch (IterationInterruptedException iie) { if (isClosed()) { handleTabletClosedDuringScan(results, lookupResult, exceededMemoryUsage, range, entriesAdded); tabletClosed = true; } else { throw iie; } } catch (TabletClosedException tce) { handleTabletClosedDuringScan(results, lookupResult, exceededMemoryUsage, range, entriesAdded); tabletClosed = true; } } return lookupResult; }
From source file:org.apache.geode.internal.cache.rollingupgrade.RollingUpgrade2DUnitTest.java
private HashSet<String> verifyOplogHeader(File dir, HashSet<String> oldFiles) throws IOException { if (oldFiles != null) { for (String file : oldFiles) { System.out.println("Known old format file: " + file); }//from w w w . j ava 2s . c o m } File[] files = dir.listFiles(); HashSet<String> verified = new HashSet<String>(); HashSet<String> oldFilesFound = new HashSet<String>(); for (File file : files) { String name = file.getName(); byte[] expect = new byte[Oplog.OPLOG_MAGIC_SEQ_REC_SIZE]; byte OPLOG_MAGIC_SEQ_ID = 92; // value of Oplog.OPLOG_MAGIC_SEQ_ID if (name.endsWith(".crf")) { expect[0] = OPLOG_MAGIC_SEQ_ID; System.arraycopy(OPLOG_TYPE.CRF.getBytes(), 0, expect, 1, OPLOG_TYPE.getLen()); verified.add(".crf"); } else if (name.endsWith(".drf")) { expect[0] = OPLOG_MAGIC_SEQ_ID; System.arraycopy(OPLOG_TYPE.DRF.getBytes(), 0, expect, 1, OPLOG_TYPE.getLen()); verified.add(".drf"); // } else if (name.endsWith(".krf")) { // expect[0] = OPLOG_MAGIC_SEQ_ID; // System.arraycopy(OPLOG_TYPE.KRF.getBytes(), 0, expect, 1, OPLOG_TYPE.getLen()); // verified.add(".krf"); } else if (name.endsWith(".if")) { expect[0] = DiskInitFile.OPLOG_MAGIC_SEQ_ID; System.arraycopy(OPLOG_TYPE.IF.getBytes(), 0, expect, 1, OPLOG_TYPE.getLen()); verified.add(".if"); } else { System.out.println("Ignored: " + file); continue; } expect[expect.length - 1] = 21; // EndOfRecord byte[] buf = new byte[Oplog.OPLOG_MAGIC_SEQ_REC_SIZE]; FileInputStream fis = new FileInputStream(file); int count = fis.read(buf, 0, 8); fis.close(); assertEquals(8, count); if (oldFiles == null) { System.out.println("Verifying old format file: " + file); assertFalse(Arrays.equals(expect, buf)); oldFilesFound.add(name); } else { if (oldFiles.contains(name)) { System.out.println("Verifying old format file: " + file); assertFalse(Arrays.equals(expect, buf)); } else { System.out.println("Verifying new format file: " + file); assertTrue(Arrays.equals(expect, buf)); } } } assertTrue(3 <= verified.size()); return oldFilesFound; }
From source file:org.metaservice.core.OntologyToLatexConverter.java
public String generate(String prefix, String namespace) throws RepositoryException, IOException, RDFParseException, SailException { RepositoryResult<Statement> result; if (namespace == null) { result = connection.getStatements(null, VANN.PREFERRED_NAMESPACE_URI, null, true); if (result.hasNext()) { namespace = result.next().getObject().stringValue(); } else {// ww w.ja v a 2 s .c o m result = connection.getStatements(null, RDF.TYPE, OWL.ONTOLOGY, true); namespace = result.next().getSubject().stringValue(); } } if (prefix == null) { prefix = connection.getStatements(null, VANN.PREFERRED_NAMESPACE_PREFIX, null, true).next().getObject() .stringValue(); } StringBuilder out = new StringBuilder(); HashSet<URI> classes = new HashSet<>(); HashSet<URI> properties = new HashSet<>(); HashSet<URI> objectProperties = new HashSet<>(); HashSet<URI> dataProperties = new HashSet<>(); HashSet<URI> annotationProperties = new HashSet<>(); HashSet<URI> things = new HashSet<>(); result = connection.getStatements(null, RDF.TYPE, RDFS.CLASS, true); result.enableDuplicateFilter(); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; classes.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.OBJECTPROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; objectProperties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.DATATYPEPROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; dataProperties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.ANNOTATIONPROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; annotationProperties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, RDF.PROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; properties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.THING, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; things.add(subject); } } out.append("package org.metaservice.api.rdf.vocabulary;\n" + "\n" + "import org.openrdf.model.*;\n" + "import org.openrdf.model.impl.*;\n\n\n\n"); out.append("/**\n").append(" * This is an automatically generated class\n") .append(" * Generator: " + OntologyToLatexConverter.class.getCanonicalName() + "\n") .append(" * @see <a href=\"" + namespace + "\">" + prefix + "</a>\n").append(" */\n") .append("public class ").append(prefix.toUpperCase()).append("{\n\n"); out.append(" public static final String NAMESPACE = \"").append(namespace).append("\";\n\n"); out.append(" public static final String PREFIX = \"").append(prefix).append("\";\n\n"); out.append(" public static final Namespace NS = new NamespaceImpl(PREFIX, NAMESPACE);\n\n"); properties.removeAll(objectProperties); properties.removeAll(dataProperties); properties.removeAll(annotationProperties); things.removeAll(properties); things.removeAll(classes); things.removeAll(objectProperties); things.removeAll(dataProperties); things.removeAll(annotationProperties); if (classes.size() > 0) { out.append("////////////////////////\n"); out.append("// CLASSES\n"); out.append("////////////////////////\n\n\n"); addPrettyClass(classes, out, connection, "_CLASS"); } if (objectProperties.size() > 0) { out.append("////////////////////////\n"); out.append("// OBJECT PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(objectProperties, out, connection, "_PROPERTY"); } if (dataProperties.size() > 0) { out.append("////////////////////////\n"); out.append("// DATA PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(dataProperties, out, connection, "_PROPERTY"); } if (annotationProperties.size() > 0) { out.append("////////////////////////\n"); out.append("// ANNOTATION PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(annotationProperties, out, connection, "_PROPERTY"); } if (properties.size() > 0) { out.append("////////////////////////\n"); out.append("// PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(properties, out, connection, "_PROPERTY"); } if (things.size() > 0) { out.append("////////////////////////\n"); out.append("// THINGS\n"); out.append("////////////////////////\n\n\n"); addPretty(things, out, connection, "_THING"); } return out.toString(); }
From source file:org.intermine.bio.dataconversion.ModEncodeMetaDataProcessor.java
private String createDataIdsTempTable(Connection connection, Integer chadoExperimentId, List<Integer> dataIds) throws SQLException { // the batch writer system doesn't like to have duplicate named tables String tableName = DATA_IDS_TABLE_NAME + "_" + chadoExperimentId + "_" + System.currentTimeMillis(); long bT = System.currentTimeMillis(); String query = " CREATE TEMPORARY TABLE " + tableName + " (data_id int)"; Statement stmt = connection.createStatement(); LOG.info("executing: " + query); stmt.execute(query);/*from www .ja v a 2 s. c om*/ try { BatchWriterPostgresCopyImpl batchWriter = new BatchWriterPostgresCopyImpl(); Batch batch = new Batch(batchWriter); HashSet<Integer> uniqueDataIds = new HashSet<Integer>(dataIds); String[] colNames = new String[] { "data_id" }; for (Integer dataId : uniqueDataIds) { batch.addRow(connection, tableName, dataId, colNames, new Object[] { dataId }); } batch.flush(connection); batch.close(connection); LOG.info("CREATED DATA IDS TABLE: " + tableName + " with " + uniqueDataIds.size() + " data ids in " + (System.currentTimeMillis() - bT) + "ms"); String idIndexQuery = "CREATE INDEX " + tableName + "_data_id_index ON " + tableName + "(data_id)"; LOG.info("DATA IDS executing: " + idIndexQuery); long bT1 = System.currentTimeMillis(); stmt.execute(idIndexQuery); LOG.info("DATA IDS TIME creating INDEX: " + (System.currentTimeMillis() - bT1) + "ms"); String analyze = "ANALYZE " + tableName; LOG.info("executing: " + analyze); long bT2 = System.currentTimeMillis(); stmt.execute(analyze); LOG.info("DATA IDS TIME analyzing: " + (System.currentTimeMillis() - bT2) + "ms"); } catch (SQLException e) { // the batch writer system doesn't like to have duplicate named tables query = "DROP TABLE " + tableName; stmt.execute(query); throw e; } return tableName; }
From source file:org.metaservice.core.OntologyToJavaConverter.java
public String generate(String prefix, String namespace) throws RepositoryException, IOException, RDFParseException, SailException { RepositoryResult<Statement> result; if (namespace == null) { result = connection.getStatements(null, VANN.PREFERRED_NAMESPACE_URI, null, true); if (result.hasNext()) { namespace = result.next().getObject().stringValue(); } else {/* w w w.j a va2 s . c o m*/ result = connection.getStatements(null, RDF.TYPE, OWL.ONTOLOGY, true); namespace = result.next().getSubject().stringValue(); } } if (prefix == null) { prefix = connection.getStatements(null, VANN.PREFERRED_NAMESPACE_PREFIX, null, true).next().getObject() .stringValue(); } StringBuilder out = new StringBuilder(); HashSet<URI> classes = new HashSet<>(); HashSet<URI> properties = new HashSet<>(); HashSet<URI> objectProperties = new HashSet<>(); HashSet<URI> dataProperties = new HashSet<>(); HashSet<URI> annotationProperties = new HashSet<>(); HashSet<URI> things = new HashSet<>(); result = connection.getStatements(null, RDF.TYPE, RDFS.CLASS, true); result.enableDuplicateFilter(); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof org.openrdf.model.URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; classes.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.OBJECTPROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof org.openrdf.model.URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; objectProperties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.DATATYPEPROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof org.openrdf.model.URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; dataProperties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.ANNOTATIONPROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof org.openrdf.model.URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; annotationProperties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, RDF.PROPERTY, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof org.openrdf.model.URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; properties.add(subject); } } result = connection.getStatements(null, RDF.TYPE, OWL.THING, true); while (result.hasNext()) { Resource resource = result.next().getSubject(); if (resource instanceof org.openrdf.model.URI && resource.toString().startsWith(namespace)) { URI subject = (URI) resource; things.add(subject); } } out.append("package org.metaservice.api.rdf.vocabulary;\n" + "\n" + "import org.openrdf.model.*;\n" + "import org.openrdf.model.impl.*;\n\n\n\n"); out.append("/**\n").append(" * This is an automatically generated class\n") .append(" * Generator: " + OntologyToJavaConverter.class.getCanonicalName() + "\n") .append(" * @see <a href=\"" + namespace + "\">" + prefix + "</a>\n").append(" */\n") .append("public class ").append(prefix.toUpperCase()).append("{\n\n"); out.append(" public static final String NAMESPACE = \"").append(namespace).append("\";\n\n"); out.append(" public static final String PREFIX = \"").append(prefix).append("\";\n\n"); out.append(" public static final Namespace NS = new NamespaceImpl(PREFIX, NAMESPACE);\n\n"); properties.removeAll(objectProperties); properties.removeAll(dataProperties); properties.removeAll(annotationProperties); things.removeAll(properties); things.removeAll(classes); things.removeAll(objectProperties); things.removeAll(dataProperties); things.removeAll(annotationProperties); if (classes.size() > 0) { out.append("////////////////////////\n"); out.append("// CLASSES\n"); out.append("////////////////////////\n\n\n"); addPretty(classes, out, connection, "_CLASS"); } if (objectProperties.size() > 0) { out.append("////////////////////////\n"); out.append("// OBJECT PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(objectProperties, out, connection, "_PROPERTY"); } if (dataProperties.size() > 0) { out.append("////////////////////////\n"); out.append("// DATA PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(dataProperties, out, connection, "_PROPERTY"); } if (annotationProperties.size() > 0) { out.append("////////////////////////\n"); out.append("// ANNOTATION PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(annotationProperties, out, connection, "_PROPERTY"); } if (properties.size() > 0) { out.append("////////////////////////\n"); out.append("// PROPERTIES\n"); out.append("////////////////////////\n\n\n"); addPretty(properties, out, connection, "_PROPERTY"); } if (things.size() > 0) { out.append("////////////////////////\n"); out.append("// THINGS\n"); out.append("////////////////////////\n\n\n"); addPretty(things, out, connection, "_THING"); } out.append(" static{\n"); out.append(" ValueFactory valueFactory = ValueFactoryImpl.getInstance();\n\n"); for (URI c : Iterables.concat(classes, objectProperties, dataProperties, annotationProperties, properties, things)) { out.append(" ").append(nameMap.get(c)).append(" = valueFactory.createURI(NAMESPACE,\"") .append(c.getLocalName()).append("\");\n"); } out.append(" }\n");//static out.append("}\n");//class return out.toString(); }
From source file:com.vmware.identity.idm.server.config.directory.DirectoryConfigStore.java
@Override public void setAuthnTypes(String tenantName, boolean password, boolean windows, boolean certificate, boolean rsaSecureID) throws Exception { // Set AuthnTypes HashSet<Integer> authnTypes = new HashSet<Integer>(); if (password) authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_PASSWORD); if (windows)//from w w w . jav a2s. c o m authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_WINDOWS); if (certificate) authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_TLS_CERTIFICATE); if (rsaSecureID) authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_RSA_SECUREID); if (authnTypes.size() == 0) // This is to distinguish the case that // none of the AuthnTypes is set and // the case of migrating from old schema // where AuthnTypes is not there. authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_NONE); int[] authnTypesArray = ArrayUtils.toPrimitive(authnTypes.toArray(new Integer[authnTypes.size()])); this.setTenantProperty(tenantName, TenantLdapObject.PROPERTY_AUTHN_TYPES, ServerUtils.getLdapValue(authnTypesArray)); }
From source file:com.vmware.identity.idm.server.config.directory.DirectoryConfigStore.java
@Override public void setAuthnTypesForProvider(String tenantName, String providerName, boolean password, boolean windows, boolean certificate, boolean rsaSecureID) throws Exception { HashSet<Integer> authnTypes = new HashSet<Integer>(); if (password) { authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_PASSWORD); }/*from www . j a v a2s . c om*/ if (windows) { authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_WINDOWS); } if (certificate) { authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_TLS_CERTIFICATE); } if (rsaSecureID) { authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_RSA_SECUREID); } if (authnTypes.size() == 0) { authnTypes.add(DirectoryConfigStore.FLAG_AUTHN_TYPE_ALLOW_NONE); } int[] authTypesArray = ArrayUtils.toPrimitive(authnTypes.toArray(new Integer[authnTypes.size()])); this.setProviderProperty(tenantName, providerName, IdentityProviderLdapObject.PROPERTY_AUTHN_TYPES, ServerUtils.getLdapValue(authTypesArray)); }