List of usage examples for java.util EnumSet noneOf
public static <E extends Enum<E>> EnumSet<E> noneOf(Class<E> elementType)
From source file:io.swagger.jaxrs.Reader.java
private static Set<Scheme> parseSchemes(String schemes) { final Set<Scheme> result = EnumSet.noneOf(Scheme.class); for (String item : StringUtils.trimToEmpty(schemes).split(",")) { final Scheme scheme = Scheme.forValue(StringUtils.trimToNull(item)); if (scheme != null) { result.add(scheme);//from ww w .j a va2 s. c o m } } return result; }
From source file:org.apache.hadoop.hdfs.server.namenode.TestCacheDirectives.java
@Test(timeout = 120000) public void testLimit() throws Exception { try {//ww w .ja v a 2 s. c om dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l)); fail("Should not be able to set a negative limit"); } catch (InvalidRequestException e) { GenericTestUtils.assertExceptionContains("negative", e); } final String destiny = "poolofdestiny"; final Path path1 = new Path("/destiny"); DFSTestUtil.createFile(dfs, path1, 2 * BLOCK_SIZE, (short) 1, 0x9494); // Start off with a limit that is too small final CachePoolInfo poolInfo = new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1); dfs.addCachePool(poolInfo); final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build(); try { dfs.addCacheDirective(info1); fail("Should not be able to cache when there is no more limit"); } catch (InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity", e); } // Raise the limit up to fit and it should work this time poolInfo.setLimit(2 * BLOCK_SIZE); dfs.modifyCachePool(poolInfo); long id1 = dfs.addCacheDirective(info1); waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 2 * BLOCK_SIZE, 1, 1, poolInfo, "testLimit:1"); // Adding another file, it shouldn't be cached final Path path2 = new Path("/failure"); DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short) 1, 0x9495); try { dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(), EnumSet.noneOf(CacheFlag.class)); fail("Should not be able to add another cached file"); } catch (InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity", e); } // Bring the limit down, the first file should get uncached poolInfo.setLimit(BLOCK_SIZE); dfs.modifyCachePool(poolInfo); waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 0, 1, 0, poolInfo, "testLimit:2"); RemoteIterator<CachePoolEntry> it = dfs.listCachePools(); assertTrue("Expected a cache pool", it.hasNext()); CachePoolStats stats = it.next().getStats(); assertEquals("Overlimit bytes should be difference of needed and limit", BLOCK_SIZE, stats.getBytesOverlimit()); // Moving a directive to a pool without enough limit should fail CachePoolInfo inadequate = new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE); dfs.addCachePool(inadequate); try { dfs.modifyCacheDirective( new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.noneOf(CacheFlag.class)); } catch (InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity", e); } // Succeeds when force=true dfs.modifyCacheDirective( new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.of(CacheFlag.FORCE)); // Also can add with force=true dfs.addCacheDirective( new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(), EnumSet.of(CacheFlag.FORCE)); }
From source file:org.rhq.enterprise.server.discovery.DiscoveryBossBean.java
@SuppressWarnings("unchecked") private void checkStatus(Subject subject, int[] resourceIds, InventoryStatus target, EnumSet<InventoryStatus> validStatuses) { Query query = entityManager.createQuery("" // + " SELECT res.inventoryStatus " // + " FROM Resource res " // + " WHERE res.id IN ( :resourceIds ) " // + "GROUP BY res.inventoryStatus "); List<Integer> resourceIdList = ArrayUtils.wrapInList(resourceIds); // Do one query per 1000 Resource id's to prevent Oracle from failing because of an IN clause with more // than 1000 items. // After the below while loop completes, this Set will contain the statuses represented by the Resources with // the passed in id's. Set<InventoryStatus> statuses = EnumSet.noneOf(InventoryStatus.class); int fromIndex = 0; while (fromIndex < resourceIds.length) { int toIndex = (resourceIds.length < (fromIndex + 1000)) ? resourceIds.length : (fromIndex + 1000); List<Integer> resourceIdSubList = resourceIdList.subList(fromIndex, toIndex); query.setParameter("resourceIds", resourceIdSubList); List<InventoryStatus> batchStatuses = query.getResultList(); statuses.addAll(batchStatuses);// w ww .j av a2 s .c o m fromIndex = toIndex; } if (!validStatuses.containsAll(statuses)) { throw new IllegalArgumentException("Can only set inventory status to [" + target + "] for Resources with current inventory status of one of [" + validStatuses + "]."); } // Do one query per 1000 Resource id's to prevent Oracle from failing because of an IN clause with more // than 1000 items. List<Resource> resources = new ArrayList<Resource>(resourceIds.length); fromIndex = 0; while (fromIndex < resourceIds.length) { int toIndex = (resourceIds.length < (fromIndex + 1000)) ? resourceIds.length : (fromIndex + 1000); int[] resourceIdSubArray = Arrays.copyOfRange(resourceIds, fromIndex, toIndex); PageList<Resource> batchResources = resourceManager.findResourceByIds(subject, resourceIdSubArray, false, PageControl.getUnlimitedInstance()); resources.addAll(batchResources); fromIndex = toIndex; } // Split the Resources into two lists - one for platforms and one for servers, since that's what // updateInventoryStatus() expects. List<Resource> platforms = new ArrayList<Resource>(); List<Resource> servers = new ArrayList<Resource>(); for (Resource resource : resources) { ResourceCategory category = resource.getResourceType().getCategory(); if (category == ResourceCategory.PLATFORM) { platforms.add(resource); } else if (category == ResourceCategory.SERVER) { servers.add(resource); } else { throw new IllegalArgumentException("Can not directly change the inventory status of a service"); } } updateInventoryStatus(subject, platforms, servers, target); }
From source file:org.codice.ddf.spatial.ogc.csw.catalog.common.source.AbstractCswSource.java
public void configureCswSource() { detailLevels = EnumSet.noneOf(ElementSetType.class); capabilities = getCapabilities();//from ww w.j av a2 s . c om if (null != capabilities) { cswVersion = capabilities.getVersion(); if (CswConstants.VERSION_2_0_1.equals(cswVersion)) { setCsw201(); } if (capabilities.getFilterCapabilities() == null) { return; } readGetRecordsOperation(capabilities); loadContentTypes(); LOGGER.debug("{}: {}", cswSourceConfiguration.getId(), capabilities.toString()); } else { LOGGER.error("{}: CSW Server did not return any capabilities.", cswSourceConfiguration.getId()); } }
From source file:org.obm.icalendar.Ical4jHelper.java
private void appendRecurence(Event event, CalendarComponent component) { EventRecurrence er = new EventRecurrence(); RRule rrule = (RRule) component.getProperty(Property.RRULE); EnumSet<RecurrenceDay> recurrenceDays = EnumSet.noneOf(RecurrenceDay.class); if (rrule != null) { Recur recur = rrule.getRecur();//from w w w.j av a2 s . co m String frequency = recur.getFrequency(); if (Recur.WEEKLY.equals(frequency) || Recur.DAILY.equals(frequency)) { for (Object ob : recur.getDayList()) { recurrenceDays.add(weekDayToRecurrenceDay((WeekDay) ob)); } if (Recur.WEEKLY.equals(frequency) && recurrenceDays.isEmpty()) { GregorianCalendar cal = getEventStartCalendar(event); WeekDay eventStartWeekDay = WeekDay.getDay(cal.get(GregorianCalendar.DAY_OF_WEEK)); recurrenceDays.add(WEEK_DAY_TO_RECURRENCE_DAY.get(eventStartWeekDay)); } } er.setDays(new RecurrenceDays(recurrenceDays)); er.setEnd(recur.getUntil()); er.setFrequence(Math.max(recur.getInterval(), 1)); // getInterval() returns -1 if no interval is defined if (er.getDays().isEmpty()) { if (Recur.DAILY.equals(frequency)) { er.setKind(RecurrenceKind.daily); } else if (Recur.WEEKLY.equals(frequency)) { er.setKind(RecurrenceKind.weekly); } else if (Recur.MONTHLY.equals(frequency)) { WeekDayList wdl = recur.getDayList(); if (wdl.size() > 0) { WeekDay day = (WeekDay) wdl.get(0); GregorianCalendar cal = getEventStartCalendar(event); er.setKind(RecurrenceKind.monthlybyday); cal.set(GregorianCalendar.DAY_OF_WEEK, WeekDay.getCalendarDay(day)); cal.set(GregorianCalendar.DAY_OF_WEEK_IN_MONTH, day.getOffset()); event.setStartDate(cal.getTime()); } else { er.setKind(RecurrenceKind.monthlybydate); } } else if (Recur.YEARLY.equals(frequency)) { er.setKind(RecurrenceKind.yearly); } } else { er.setKind(RecurrenceKind.weekly); } } event.setRecurrence(er); appendNegativeExceptions(event, component.getProperties(Property.EXDATE)); }
From source file:org.jahia.services.search.facets.SimpleJahiaJcrFacets.java
private <T extends Comparable<T>> NamedList<Object> getFacetRangeCounts(final SchemaField sf, final String f, final RangeEndpointCalculator<T> calc) throws IOException { String prefix = params.getFieldParam(f, FacetParams.FACET_PREFIX); final NamedList<Object> res = new SimpleOrderedMap<Object>(); final NamedList<Object> counts = new NamedList<Object>(); res.add("counts", counts); final T start = calc.getValue(required.getFieldParam(f, FacetParams.FACET_RANGE_START)); // not final, hardend may change this T end = calc.getValue(required.getFieldParam(f, FacetParams.FACET_RANGE_END)); if (end.compareTo(start) < 0) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "range facet 'end' comes before 'start': " + end + " < " + start); }/*w ww . j a v a 2 s.c o m*/ final String gap = required.getFieldParam(f, FacetParams.FACET_RANGE_GAP); // explicitly return the gap. compute this early so we are more // likely to catch parse errors before attempting math res.add("gap", calc.getGap(gap)); final int minCount = params.getFieldInt(f, FacetParams.FACET_MINCOUNT, 0); final EnumSet<FacetRangeInclude> include = FacetRangeInclude .parseParam(params.getFieldParams(f, FacetParams.FACET_RANGE_INCLUDE)); T low = start; while (low.compareTo(end) < 0) { T high = calc.addGap(low, gap); if (end.compareTo(high) < 0) { if (params.getFieldBool(f, FacetParams.FACET_RANGE_HARD_END, false)) { high = end; } else { end = high; } } if (high.compareTo(low) < 0) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "range facet infinite loop (is gap negative? did the math overflow?)"); } final boolean includeLower = (include.contains(FacetRangeInclude.LOWER) || (include.contains(FacetRangeInclude.EDGE) && 0 == low.compareTo(start))); final boolean includeUpper = (include.contains(FacetRangeInclude.UPPER) || (include.contains(FacetRangeInclude.EDGE) && 0 == high.compareTo(end))); final String lowS = calc.formatValue(low); final String highS = calc.formatValue(high); Query rangeQ = getRangeQuery(sf.getType(), null, sf, prefix, lowS, highS, includeLower, includeUpper); final int count = rangeCount(rangeQ); if (count >= minCount) { counts.add(lowS + PROPNAME_INDEX_SEPARATOR + rangeQ.toString(), count); } low = high; } // explicitly return the start and end so all the counts // (including before/after/between) are meaningful - even if mincount // has removed the neighboring ranges res.add("start", start); res.add("end", end); final String[] othersP = params.getFieldParams(f, FacetParams.FACET_RANGE_OTHER); if (null != othersP && 0 < othersP.length) { Set<FacetRangeOther> others = EnumSet.noneOf(FacetRangeOther.class); for (final String o : othersP) { others.add(FacetRangeOther.get(o)); } // no matter what other values are listed, we don't do // anything if "none" is specified. if (!others.contains(FacetRangeOther.NONE)) { boolean all = others.contains(FacetRangeOther.ALL); final String startS = calc.formatValue(start); final String endS = calc.formatValue(end); if (all || others.contains(FacetRangeOther.BEFORE)) { // include upper bound if "outer" or if first gap doesn't already include it Query rangeQ = getRangeQuery(sf.getType(), null, sf, prefix, null, startS, false, (include.contains(FacetRangeInclude.OUTER) || (!(include.contains(FacetRangeInclude.LOWER) || include.contains(FacetRangeInclude.EDGE))))); int count = rangeCount(rangeQ); if (count >= minCount) { res.add(FacetRangeOther.BEFORE.toString(), count); counts.add(FacetRangeOther.BEFORE.toString() + PROPNAME_INDEX_SEPARATOR + rangeQ.toString(), count); } } if (all || others.contains(FacetRangeOther.AFTER)) { // include lower bound if "outer" or if last gap doesn't already include it Query rangeQ = getRangeQuery(sf.getType(), null, sf, prefix, endS, null, (include.contains(FacetRangeInclude.OUTER) || (!(include.contains(FacetRangeInclude.UPPER) || include.contains(FacetRangeInclude.EDGE)))), false); int count = rangeCount(rangeQ); if (count >= minCount) { res.add(FacetRangeOther.AFTER.toString(), count); counts.add(FacetRangeOther.AFTER.toString() + PROPNAME_INDEX_SEPARATOR + rangeQ.toString(), count); } } if (all || others.contains(FacetRangeOther.BETWEEN)) { Query rangeQ = getRangeQuery(sf.getType(), null, sf, prefix, startS, endS, (include.contains(FacetRangeInclude.LOWER) || include.contains(FacetRangeInclude.EDGE)), (include.contains(FacetRangeInclude.UPPER) || include.contains(FacetRangeInclude.EDGE))); int count = rangeCount(rangeQ); if (count >= minCount) { res.add(FacetRangeOther.BETWEEN.toString(), count); counts.add( FacetRangeOther.BETWEEN.toString() + PROPNAME_INDEX_SEPARATOR + rangeQ.toString(), count); } } } } return res; }
From source file:net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl.java
/** * Clean up expired entities/devices/* ww w .java2s.c om*/ */ protected void cleanupEntities() { Calendar c = Calendar.getInstance(); c.add(Calendar.MILLISECOND, -ENTITY_TIMEOUT); Date cutoff = c.getTime(); ArrayList<Entity> toRemove = new ArrayList<Entity>(); ArrayList<Entity> toKeep = new ArrayList<Entity>(); Iterator<Device> diter = deviceMap.values().iterator(); LinkedList<DeviceUpdate> deviceUpdates = new LinkedList<DeviceUpdate>(); while (diter.hasNext()) { Device d = diter.next(); while (true) { deviceUpdates.clear(); toRemove.clear(); toKeep.clear(); for (Entity e : d.getEntities()) { if (e.getLastSeenTimestamp() != null && 0 > e.getLastSeenTimestamp().compareTo(cutoff)) { // individual entity needs to be removed toRemove.add(e); } else { toKeep.add(e); } } if (toRemove.size() == 0) { break; } for (Entity e : toRemove) { removeEntity(e, d.getEntityClass(), d, toKeep); } if (toKeep.size() > 0) { Device newDevice = allocateDevice(d.getDeviceKey(), d.getDhcpClientName(), d.getOldAPs(), //XXX d.getAps(), toKeep, d.getEntityClass()); EnumSet<DeviceField> changedFields = EnumSet.noneOf(DeviceField.class); for (Entity e : toRemove) { changedFields.addAll(findChangedFields(newDevice, e)); } DeviceUpdate update = null; if (changedFields.size() > 0) update = new DeviceUpdate(d, CHANGE, changedFields); //FIXME if (!deviceMap.insert(newDevice.getDeviceKey(), newDevice)) { // concurrent modification; try again // need to use device that is the map now for the next // iteration d = deviceMap.get(d.getDeviceKey()); if (null != d) continue; } if (update != null) deviceUpdates.add(update); } else { DeviceUpdate update = new DeviceUpdate(d, DELETE, null); if (!deviceMap.remove(d.getDeviceKey(), d)) { // concurrent modification; try again // need to use device that is the map now for the next // iteration d = deviceMap.get(d.getDeviceKey()); if (null != d) continue; } deviceUpdates.add(update); } processUpdates(deviceUpdates); break; } } }
From source file:net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl.java
private EnumSet<DeviceField> getEntityKeys(Long macAddress, Short vlan, Integer ipv4Address, Long switchDPID, Integer switchPort) {/* w w w . j a va 2 s .c o m*/ // FIXME: vlan==null is a valid search. Need to handle this // case correctly. Note that the code will still work correctly. // But we might do a full device search instead of using an index. EnumSet<DeviceField> keys = EnumSet.noneOf(DeviceField.class); if (macAddress != null) keys.add(DeviceField.MAC); if (vlan != null) keys.add(DeviceField.VLAN); if (ipv4Address != null) keys.add(DeviceField.IPV4); if (switchDPID != null) keys.add(DeviceField.SWITCH); if (switchPort != null) keys.add(DeviceField.PORT); return keys; }
From source file:microsoft.exchange.webservices.data.autodiscover.AutodiscoverService.java
/** * Gets the endpoints from HTTP web response. * * @param request the request// w w w .ja v a 2 s .co m * @return Endpoints enabled. * @throws microsoft.exchange.webservices.data.exception.EWSHttpException the eWS http exception */ private EnumSet<AutodiscoverEndpoints> getEndpointsFromHttpWebResponse(HttpWebRequest request) throws EWSHttpException { EnumSet<AutodiscoverEndpoints> endpoints = EnumSet.noneOf(AutodiscoverEndpoints.class); endpoints.add(AutodiscoverEndpoints.Legacy); if (!(request.getResponseHeaders().get(AutodiscoverSoapEnabledHeaderName) == null || request.getResponseHeaders().get(AutodiscoverSoapEnabledHeaderName).isEmpty())) { endpoints.add(AutodiscoverEndpoints.Soap); } if (!(request.getResponseHeaders().get(AutodiscoverWsSecurityEnabledHeaderName) == null || request.getResponseHeaders().get(AutodiscoverWsSecurityEnabledHeaderName).isEmpty())) { endpoints.add(AutodiscoverEndpoints.WsSecurity); } /* if (! (request.getResponseHeaders().get( AutodiscoverWsSecuritySymmetricKeyEnabledHeaderName) !=null || request .getResponseHeaders().get( AutodiscoverWsSecuritySymmetricKeyEnabledHeaderName).isEmpty())) { endpoints .add( AutodiscoverEndpoints.WSSecuritySymmetricKey); } if (!(request.getResponseHeaders().get( AutodiscoverWsSecurityX509CertEnabledHeaderName)!=null || request.getResponseHeaders().get( AutodiscoverWsSecurityX509CertEnabledHeaderName).isEmpty())) { endpoints .add(AutodiscoverEndpoints.WSSecurityX509Cert); }*/ return endpoints; }