List of usage examples for java.util Set clear
void clear();
From source file:com.opengamma.integration.tool.marketdata.HtsSyncTool.java
private Set<String> filterClassifiers(Set<String> srcMasterClassifiers, Set<String> destMasterClassifiers) { Set<String> commonComponentNames = Sets.newLinkedHashSet(); commonComponentNames.addAll(srcMasterClassifiers); commonComponentNames.retainAll(destMasterClassifiers); if (getCommandLine().hasOption("classifiers")) { List<String> classifiersList = Arrays.asList(getCommandLine().getOptionValues("classifiers")); Set<String> classifiers = Sets.newHashSet(); classifiers.addAll(classifiersList); classifiers.removeAll(classifiers); if (classifiers.size() > 0) { System.err.println("Couldn't find classifiers: " + classifiers.toString() + ", skipping those"); }// ww w .j ava 2 s .c o m classifiers.clear(); classifiers.addAll(classifiersList); commonComponentNames.retainAll(classifiers); } return commonComponentNames; }
From source file:org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner.java
/** * Load all hfile references in all replication queues from ZK. This method guarantees to return a * snapshot which contains all hfile references in the zookeeper at the start of this call. * However, some newly created hfile references during the call may not be included. *///from w w w . j a v a 2 s . c o m private Set<String> loadHFileRefsFromPeers() throws KeeperException { Set<String> hfileRefs = Sets.newHashSet(); List<String> listOfPeers; for (int retry = 0;; retry++) { int v0 = rqc.getHFileRefsNodeChangeVersion(); hfileRefs.clear(); listOfPeers = rqc.getAllPeersFromHFileRefsQueue(); if (listOfPeers == null) { LOG.debug("Didn't find any peers with hfile references, won't prevent any deletions."); return ImmutableSet.of(); } for (String id : listOfPeers) { List<String> peerHFileRefs = rqc.getReplicableHFiles(id); if (peerHFileRefs != null) { hfileRefs.addAll(peerHFileRefs); } } int v1 = rqc.getHFileRefsNodeChangeVersion(); if (v0 == v1) { return hfileRefs; } LOG.debug(String.format( "Replication hfile references node cversion changed from " + "%d to %d, retry = %d", v0, v1, retry)); } }
From source file:com.spectralogic.ds3client.metadata.MetadataReceivedListenerImpl_Test.java
@Test public void testGettingMetadataFailureHandler() throws IOException, InterruptedException { Assume.assumeFalse(Platform.isWindows()); try {// w w w . j a v a 2s.c o m final String tempPathPrefix = null; final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix); final String fileName = "Gracie.txt"; final Path filePath = Files.createFile(Paths.get(tempDirectory.toString(), fileName)); try { // set permissions if (!Platform.isWindows()) { final PosixFileAttributes attributes = Files.readAttributes(filePath, PosixFileAttributes.class); final Set<PosixFilePermission> permissions = attributes.permissions(); permissions.clear(); permissions.add(PosixFilePermission.OWNER_READ); permissions.add(PosixFilePermission.OWNER_WRITE); Files.setPosixFilePermissions(filePath, permissions); } // get permissions final ImmutableMap.Builder<String, Path> fileMapper = ImmutableMap.builder(); fileMapper.put(filePath.toString(), filePath); final Map<String, String> metadataFromFile = new MetadataAccessImpl(fileMapper.build()) .getMetadataValue(filePath.toString()); FileUtils.deleteDirectory(tempDirectory.toFile()); // put old permissions back final Metadata metadata = new MetadataImpl(new MockedHeadersReturningKeys(metadataFromFile)); final AtomicInteger numTimesFailureHandlerCalled = new AtomicInteger(0); new MetadataReceivedListenerImpl(tempDirectory.toString(), new FailureEventListener() { @Override public void onFailure(final FailureEvent failureEvent) { numTimesFailureHandlerCalled.incrementAndGet(); assertEquals(FailureEvent.FailureActivity.RestoringMetadata, failureEvent.doingWhat()); } }, "localhost").metadataReceived(fileName, metadata); assertEquals(1, numTimesFailureHandlerCalled.get()); } finally { FileUtils.deleteDirectory(tempDirectory.toFile()); } } catch (final Throwable t) { fail("Throwing exceptions from metadata est verbotten"); } }
From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService.java
private synchronized void updateDeletedBlockId(ExtendedBlock block) { Set<Long> blockIds = deletedBlockIds.get(block.getBlockPoolId()); if (blockIds == null) { blockIds = new HashSet<Long>(); deletedBlockIds.put(block.getBlockPoolId(), blockIds); }/*from w ww . j av a 2 s .com*/ blockIds.add(block.getBlockId()); numDeletedBlocks++; if (numDeletedBlocks == MAX_DELETED_BLOCKS) { for (Entry<String, Set<Long>> e : deletedBlockIds.entrySet()) { String bpid = e.getKey(); Set<Long> bs = e.getValue(); fsdatasetImpl.removeDeletedBlocks(bpid, bs); bs.clear(); } numDeletedBlocks = 0; } }
From source file:com.spectralogic.ds3client.metadata.MetadataReceivedListenerImpl_Test.java
@Test public void testGettingMetadata() throws IOException, InterruptedException { final String tempPathPrefix = null; final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix); final String fileName = "Gracie.txt"; final Path filePath = Files.createFile(Paths.get(tempDirectory.toString(), fileName)); try {/* w w w .j a v a 2s .c o m*/ // set permissions if (!Platform.isWindows()) { final PosixFileAttributes attributes = Files.readAttributes(filePath, PosixFileAttributes.class); final Set<PosixFilePermission> permissions = attributes.permissions(); permissions.clear(); permissions.add(PosixFilePermission.OWNER_READ); permissions.add(PosixFilePermission.OWNER_WRITE); Files.setPosixFilePermissions(filePath, permissions); } // get permissions final ImmutableMap.Builder<String, Path> fileMapper = ImmutableMap.builder(); fileMapper.put(filePath.toString(), filePath); final ImmutableMap<String, Path> immutableFileMapper = fileMapper.build(); final Map<String, String> metadataFromFile = new MetadataAccessImpl(immutableFileMapper) .getMetadataValue(filePath.toString()); // change permissions if (Platform.isWindows()) { Runtime.getRuntime().exec("attrib -A " + filePath.toString()).waitFor(); } else { final PosixFileAttributes attributes = Files.readAttributes(filePath, PosixFileAttributes.class); final Set<PosixFilePermission> permissions = attributes.permissions(); permissions.clear(); permissions.add(PosixFilePermission.OWNER_READ); permissions.add(PosixFilePermission.OWNER_WRITE); permissions.add(PosixFilePermission.OWNER_EXECUTE); Files.setPosixFilePermissions(filePath, permissions); } // put old permissions back final Metadata metadata = new MetadataImpl(new MockedHeadersReturningKeys(metadataFromFile)); new MetadataReceivedListenerImpl(tempDirectory.toString()).metadataReceived(fileName, metadata); // see that we put back the original metadata fileMapper.put(filePath.toString(), filePath); final Map<String, String> metadataFromUpdatedFile = new MetadataAccessImpl(immutableFileMapper) .getMetadataValue(filePath.toString()); if (Platform.isWindows()) { assertEquals("A", metadataFromUpdatedFile .get(MetadataKeyConstants.METADATA_PREFIX + MetadataKeyConstants.KEY_FLAGS)); } else { assertEquals("100600", metadataFromUpdatedFile .get(MetadataKeyConstants.METADATA_PREFIX + MetadataKeyConstants.KEY_MODE)); assertEquals("600(rw-------)", metadataFromUpdatedFile .get(MetadataKeyConstants.METADATA_PREFIX + MetadataKeyConstants.KEY_PERMISSION)); } } finally { FileUtils.deleteDirectory(tempDirectory.toFile()); } }
From source file:org.apache.ambari.server.orm.dao.ConfigGroupHostMappingDAO.java
@Transactional public void removeAllByHost(String hostname) { TypedQuery<String> query = entityManagerProvider.get().createQuery( "DELETE FROM ConfigGroupHostMappingEntity confighosts WHERE " + "confighosts.hostname = ?1", String.class); daoUtils.executeUpdate(query, hostname); Set<ConfigGroupHostMapping> setByHost = configGroupHostMappingByHost.get(hostname); setByHost.clear(); }
From source file:com.krawler.spring.companyDetails.companyDetailsDAOImpl.java
@Override public KwlReturnObject updateCompany(HashMap hm) throws ServiceException { String companyid = ""; DateFormat dateformat = null; List ll = null;//from www . ja va 2s .c o m int dl = 0; try { Company company = null; if (hm.containsKey("companyid") && hm.get("companyid") != null) { companyid = hm.get("companyid").toString(); if (hm.containsKey("addCompany") && (Boolean) hm.get("addCompany")) { company = new Company(); company.setCompanyID(companyid); if (hm.containsKey("companyId") && hm.get("companyId") != null) { Long companyId = (Long) hm.get("companyId"); company.setCompanyId(companyId); } } else { company = (Company) get(Company.class, companyid); } } if (hm.containsKey("dateformat") && hm.get("dateformat") != null) { dateformat = (DateFormat) hm.get("dateformat"); } if (hm.containsKey("creater") && hm.get("creater") != null) { company.setCreator((User) get(User.class, (String) hm.get("creater"))); } if (hm.containsKey("companyname") && hm.get("companyname") != null) { company.setCompanyName((String) hm.get("companyname")); } if (hm.containsKey("address") && hm.get("address") != null) { company.setAddress((String) hm.get("address")); } if (hm.containsKey("city") && hm.get("city") != null) { company.setCity((String) hm.get("city")); } if (hm.containsKey("state") && hm.get("state") != null) { company.setState((String) hm.get("state")); } if (hm.containsKey("zip") && hm.get("zip") != null) { company.setZipCode((String) hm.get("zip")); } if (hm.containsKey("phone") && hm.get("phone") != null) { company.setPhoneNumber((String) hm.get("phone")); } if (hm.containsKey("fax") && hm.get("fax") != null) { company.setFaxNumber((String) hm.get("fax")); } if (hm.containsKey("website") && hm.get("website") != null) { company.setWebsite((String) hm.get("website")); } if (hm.containsKey("mail") && hm.get("mail") != null) { company.setEmailID((String) hm.get("mail")); } if (hm.containsKey("domainname") && hm.get("domainname") != null) { company.setSubDomain((String) hm.get("domainname")); } if (hm.containsKey("country") && hm.get("country") != null) { company.setCountry((Country) get(Country.class, hm.get("country").toString())); } if (hm.containsKey("currency") && hm.get("currency") != null) { company.setCurrency((KWLCurrency) get(KWLCurrency.class, (String) hm.get("currency"))); } if (hm.containsKey("timezone") && hm.get("timezone") != null) { KWLTimeZone timeZone = (KWLTimeZone) get(KWLTimeZone.class, (String) hm.get("timezone")); company.setTimeZone(timeZone); } if (hm.containsKey("deleteflag") && hm.get("deleteflag") != null) { company.setDeleted((Integer) hm.get("deleteflag")); } if (hm.containsKey("createdon") && hm.get("createdon") != null) { company.setCreatedOn(new Date()); } if (hm.containsKey("activated") && hm.get("activated") != null) { company.setActivated(Boolean.TRUE.getBoolean(hm.get("activated").toString())); } company.setModifiedOn(new Date()); if (hm.containsKey("holidays") && hm.get("holidays") != null) { JSONArray jArr = new JSONArray((String) hm.get("holidays")); Set<CompanyHoliday> holidays = company.getHolidays(); holidays.clear(); DateFormat formatter = dateformat; for (int i = 0; i < jArr.length(); i++) { CompanyHoliday day = new CompanyHoliday(); JSONObject obj = jArr.getJSONObject(i); day.setDescription(obj.getString("description")); day.setHolidayDate(formatter.parse(obj.getString("day"))); day.setCompany(company); holidays.add(day); } } if (hm.containsKey("logo") && hm.get("logo") != null && !StringUtil.isNullOrEmpty(hm.get("logo").toString())) { String imageName = ((FileItem) (hm.get("logo"))).getName(); if (imageName != null && imageName.length() > 0) { String fileName = companyid + FileUploadHandler.getCompanyImageExt(); company.setCompanyLogo(Constants.ImgBasePath + fileName); new FileUploadHandler().uploadImage((FileItem) hm.get("logo"), fileName, storageHandlerImpl.GetProfileImgStorePath(), 130, 25, true, false); } } save(company); ll = new ArrayList(); ll.add(company); dl = ll.size(); } catch (Exception e) { throw ServiceException.FAILURE("companyDetailsDAOImpl.updateCompany", e); } return new KwlReturnObject(true, KWLErrorMsgs.S01, "", ll, dl); }
From source file:org.broadleafcommerce.openadmin.server.security.ldap.BroadleafAdminLdapUserDetailsMapper.java
protected AdminUser saveAdminUserAndSecurityData(AdminUser adminUser, Set<AdminRole> adminRoles) { //We have to do this because BLC replies on the role relationships being stored in the DB Set<AdminRole> roleSet = adminUser.getAllRoles(); //First, remove all roles associated with the user if they already existed if (roleSet != null) { //First, remove all role relationships in case they have changed roleSet.clear(); } else {//w w w . ja v a2 s .com roleSet = new HashSet<AdminRole>(); adminUser.setAllRoles(roleSet); } //Now, add all of the role relationships back. if (adminRoles != null) { for (AdminRole role : adminRoles) { roleSet.add(role); } } //Save the user data and all of the roles... return securityService.saveAdminUser(adminUser); }
From source file:com.github.sps.metrics.opentsdb.OpenTsdb.java
/** * send a set of metrics to opentsdb//from w w w . j a v a 2 s . co m */ public void send(Set<OpenTsdbMetric> metrics) { // we set the patch size because of existing issue in opentsdb where large batch of metrics failed // see at https://groups.google.com/forum/#!topic/opentsdb/U-0ak_v8qu0 // we recommend batch size of 5 - 10 will be safer // alternatively you can enable chunked request if (batchSizeLimit > 0 && metrics.size() > batchSizeLimit) { final Set<OpenTsdbMetric> smallMetrics = new HashSet<OpenTsdbMetric>(); for (final OpenTsdbMetric metric : metrics) { smallMetrics.add(metric); if (smallMetrics.size() >= batchSizeLimit) { sendHelper(smallMetrics); smallMetrics.clear(); } } sendHelper(smallMetrics); } else { sendHelper(metrics); } }
From source file:edu.wisc.my.portlets.bookmarks.domain.Folder.java
/** * @see java.lang.Object#hashCode()/*from w w w. j a v a 2 s. c om*/ */ public int hashCode() { final Set<Integer> visited = hashCodeVisitedFolder.getSet(); final int identityHash = System.identityHashCode(this); try { if (!visited.add(identityHash)) { visited.clear(); throw new IllegalStateException("A loop exists in the Folder tree."); } return new HashCodeBuilder(-409984457, 961354191).appendSuper(super.hashCode()).append(this.children) .append(this.minimized).toHashCode(); } finally { visited.remove(identityHash); } }