Example usage for java.util TreeSet clear

List of usage examples for java.util TreeSet clear

Introduction

In this page you can find the example usage for java.util TreeSet clear.

Prototype

public void clear() 

Source Link

Document

Removes all of the elements from this set.

Usage

From source file:org.apache.accumulo.server.gc.SimpleGarbageCollector.java

/**
 * This method gets a set of candidates for deletion by scanning the METADATA table deleted flag keyspace
 *//*from  www . j  a  va 2 s .c  o  m*/
SortedSet<String> getCandidates() throws Exception {
    TreeSet<String> candidates = new TreeSet<String>();

    if (offline) {
        checkForBulkProcessingFiles = true;
        try {
            for (String validExtension : FileOperations.getValidExtensions()) {
                for (FileStatus stat : fs
                        .globStatus(new Path(ServerConstants.getTablesDir() + "/*/*/*." + validExtension))) {
                    String cand = stat.getPath().toUri().getPath();
                    if (!cand.contains(ServerConstants.getRootTabletDir())) {
                        candidates.add(cand.substring(ServerConstants.getTablesDir().length()));
                        log.debug("Offline candidate: " + cand);
                    }
                }
            }
        } catch (IOException e) {
            log.error(
                    "Unable to check the filesystem for offline candidates. Removing all candidates for deletion to be safe.",
                    e);
            candidates.clear();
        }
        return candidates;
    }

    Scanner scanner = instance.getConnector(credentials).createScanner(Constants.METADATA_TABLE_NAME,
            Constants.NO_AUTHS);

    if (continueKey != null) {
        // want to ensure GC makes progress... if the 1st N deletes are stable and we keep processing them, then will never inspect deletes after N
        scanner.setRange(new Range(continueKey, true, Constants.METADATA_DELETES_KEYSPACE.getEndKey(),
                Constants.METADATA_DELETES_KEYSPACE.isEndKeyInclusive()));
        continueKey = null;
    } else {
        // scan the reserved keyspace for deletes
        scanner.setRange(Constants.METADATA_DELETES_KEYSPACE);
    }

    // find candidates for deletion; chop off the prefix
    checkForBulkProcessingFiles = false;
    for (Entry<Key, Value> entry : scanner) {
        String cand = entry.getKey().getRow().toString()
                .substring(Constants.METADATA_DELETE_FLAG_PREFIX.length());
        candidates.add(cand);
        checkForBulkProcessingFiles |= cand.toLowerCase(Locale.ENGLISH).contains(Constants.BULK_PREFIX);
        if (almostOutOfMemory()) {
            candidateMemExceeded = true;
            log.info(
                    "List of delete candidates has exceeded the memory threshold. Attempting to delete what has been gathered so far.");
            continueKey = entry.getKey();
            break;
        }
    }

    return candidates;
}

From source file:org.apache.hadoop.dfs.NamenodeFsck.java

private void copyBlock(DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;// w  w w . j a v  a 2s. c  o  m
    DFSClient.BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
        DatanodeInfo chosenNode;

        try {
            chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
            targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
        } catch (IOException ie) {
            if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
                throw new IOException("Could not obtain block " + lblock);
            }
            LOG.info("Could not obtain block from any node:  " + ie);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear();
            failures++;
            continue;
        }
        try {
            s = new Socket();
            s.connect(targetAddr, FSConstants.READ_TIMEOUT);
            s.setSoTimeout(FSConstants.READ_TIMEOUT);

            blockReader = DFSClient.BlockReader.newBlockReader(s,
                    targetAddr.toString() + ":" + block.getBlockId(), block.getBlockId(),
                    block.getGenerationStamp(), 0, -1, conf.getInt("io.file.buffer.size", 4096));

        } catch (IOException ex) {
            // Put chosen node into dead list, continue
            LOG.info("Failed to connect to " + targetAddr + ":" + ex);
            deadNodes.add(chosenNode);
            if (s != null) {
                try {
                    s.close();
                } catch (IOException iex) {
                }
            }
            s = null;
        }
    }
    if (blockReader == null) {
        throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
        while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
            fos.write(buf, 0, cnt);
            bytesRead += cnt;
        }
        if (bytesRead != block.getNumBytes()) {
            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned "
                    + bytesRead + " bytes");
        }
    } catch (Exception e) {
        e.printStackTrace();
        success = false;
    } finally {
        try {
            s.close();
        } catch (Exception e1) {
        }
    }
    if (!success)
        throw new Exception("Could not copy block data for " + lblock.getBlock());
}

From source file:org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.java

private void copyBlock(DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;//from w ww.ja v  a  2  s. c  o m
    DFSClient.BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
        DatanodeInfo chosenNode;

        try {
            chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
            targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
        } catch (IOException ie) {
            if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
                throw new IOException("Could not obtain block " + lblock);
            }
            LOG.info("Could not obtain block from any node:  " + ie);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear();
            failures++;
            continue;
        }
        try {
            s = new Socket();
            s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
            s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

            blockReader = DFSClient.BlockReader.newBlockReader(s,
                    targetAddr.toString() + ":" + block.getBlockId(), block.getBlockId(),
                    lblock.getBlockToken(), block.getGenerationStamp(), 0, -1,
                    conf.getInt("io.file.buffer.size", 4096));

        } catch (IOException ex) {
            // Put chosen node into dead list, continue
            LOG.info("Failed to connect to " + targetAddr + ":" + ex);
            deadNodes.add(chosenNode);
            if (s != null) {
                try {
                    s.close();
                } catch (IOException iex) {
                }
            }
            s = null;
        }
    }
    if (blockReader == null) {
        throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
        while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
            fos.write(buf, 0, cnt);
            bytesRead += cnt;
        }
        if (bytesRead != block.getNumBytes()) {
            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned "
                    + bytesRead + " bytes");
        }
    } catch (Exception e) {
        e.printStackTrace();
        success = false;
    } finally {
        try {
            s.close();
        } catch (Exception e1) {
        }
    }
    if (!success)
        throw new Exception("Could not copy block data for " + lblock.getBlock());
}

From source file:org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.java

private void copyBlock(final DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    BlockReader blockReader = null;//w  ww  . j  av  a2s . c o  m
    ExtendedBlock block = lblock.getBlock();

    while (blockReader == null) {
        DatanodeInfo chosenNode;

        try {
            chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
            targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
        } catch (IOException ie) {
            if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
                throw new IOException("Could not obtain block " + lblock, ie);
            }
            LOG.info("Could not obtain block from any node:  " + ie);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear();
            failures++;
            continue;
        }
        try {
            String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(),
                    block.getBlockId());
            blockReader = new BlockReaderFactory(dfs.getConf()).setFileName(file).setBlock(block)
                    .setBlockToken(lblock.getBlockToken()).setStartOffset(0).setLength(-1)
                    .setVerifyChecksum(true).setClientName("fsck").setDatanodeInfo(chosenNode)
                    .setInetSocketAddress(targetAddr).setCachingStrategy(CachingStrategy.newDropBehind())
                    .setClientCacheContext(dfs.getClientContext()).setConfiguration(namenode.conf)
                    .setRemotePeerFactory(new RemotePeerFactory() {
                        @Override
                        public Peer newConnectedPeer(InetSocketAddress addr,
                                Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
                                throws IOException {
                            Peer peer = null;
                            Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
                            try {
                                s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
                                s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
                                peer = TcpPeerServer.peerFromSocketAndKey(dfs.getSaslDataTransferClient(), s,
                                        NamenodeFsck.this, blockToken, datanodeId);
                            } finally {
                                if (peer == null) {
                                    IOUtils.closeQuietly(s);
                                }
                            }
                            return peer;
                        }
                    }).build();
        } catch (IOException ex) {
            // Put chosen node into dead list, continue
            LOG.info("Failed to connect to " + targetAddr + ":" + ex);
            deadNodes.add(chosenNode);
        }
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
        while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
            fos.write(buf, 0, cnt);
            bytesRead += cnt;
        }
        if (bytesRead != block.getNumBytes()) {
            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned "
                    + bytesRead + " bytes");
        }
    } catch (Exception e) {
        LOG.error("Error reading block", e);
        success = false;
    } finally {
        blockReader.close();
    }
    if (!success) {
        throw new Exception("Could not copy block data for " + lblock.getBlock());
    }
}

From source file:org.cloudata.core.tabletserver.DiskSSTable.java

public ColumnValue findClosestMeta(Row.Key rowKey, String columnName, boolean great) throws IOException {
    lock.obtainReadLock();//from w  ww .  ja  v  a 2 s . co m
    try {
        if (columnMemoryCaches.containsKey(columnName)) {
            ColumnMemoryCache cache = columnMemoryCaches.get(columnName);
            return cache.findClosest(rowKey);
        }
        List<TabletMapFile> tabletMapFiles = mapFiles.get(columnName);
        if (tabletMapFiles == null || tabletMapFiles.isEmpty()) {
            return null;
        }

        MapFileReader[] readers = new MapFileReader[tabletMapFiles.size()];

        TreeSet<MetaValue> metaValues = new TreeSet<MetaValue>();

        TreeSet<ColumnValue> workPlace = new TreeSet<ColumnValue>();

        try {
            //init
            CellFilter cellFilter = new CellFilter(columnName);
            int index = 0;
            for (TabletMapFile tabletMapFile : tabletMapFiles) {
                MapFileReader reader = tabletMapFile.getMapFileReader(rowKey, Row.Key.MAX_KEY, cellFilter);
                ColumnValue columnValue = null;
                while ((columnValue = reader.next()) != null) {
                    if (great) {
                        if (columnValue.getRowKey().compareTo(rowKey) < 0) {
                            continue;
                        }
                    } else {
                        if (columnValue.getRowKey().compareTo(rowKey) <= 0) {
                            continue;
                        }
                    }
                    break;
                }
                if (columnValue != null) {
                    workPlace.add(columnValue);
                    readers[index] = reader;
                } else {
                    reader.close();
                    readers[index] = null;
                }
                index++;
            }

            //findClosestMeta
            while (true) {
                if (workPlace.isEmpty()) {
                    return null;
                }
                ColumnValue winnerColumnValue = workPlace.first();
                metaValues.add(new MetaValue(winnerColumnValue));
                workPlace.remove(winnerColumnValue);
                Row.Key winnerRowKey = winnerColumnValue.getRowKey();

                List<ColumnValue> tempWorkPlace = new ArrayList<ColumnValue>();
                tempWorkPlace.addAll(workPlace);
                for (ColumnValue eachColumnValue : tempWorkPlace) {
                    if (winnerRowKey.equals(eachColumnValue.getRowKey())) {
                        metaValues.add(new MetaValue(eachColumnValue));
                        workPlace.remove(eachColumnValue);
                    }
                }

                for (int i = 0; i < readers.length; i++) {
                    if (readers[i] == null) {
                        continue;
                    }
                    ColumnValue columnValue = null;
                    while ((columnValue = readers[i].next()) != null) {
                        if (winnerRowKey.equals(columnValue.getRowKey())) {

                            metaValues.add(new MetaValue(columnValue));
                        } else {
                            workPlace.add(columnValue);
                            break;
                        }
                    }
                    if (columnValue == null) {
                        readers[i].close();
                        readers[i] = null;
                    }
                }

                if (metaValues.size() > 0) {
                    MetaValue firstValue = metaValues.first();
                    if (!firstValue.columnValue.isDeleted()) {
                        return firstValue.columnValue;
                    } else {
                        metaValues.clear();
                    }
                }
            }
        } finally {
            for (int i = 0; i < readers.length; i++) {
                try {
                    if (readers[i] != null) {
                        readers[i].close();
                    }
                } catch (Exception e) {
                    LOG.warn("Can't close MapFileReader:" + e.getMessage());
                }
            }
        }
    } finally {
        lock.releaseReadLock();
    }
}

From source file:org.codehaus.enunciate.modules.jboss.JBossDeploymentModule.java

@Override
protected void doBuild() throws EnunciateException, IOException {
    super.doBuild();

    BaseWebAppFragment webappFragment = new BaseWebAppFragment(getName());
    HashMap<String, String> jbossContextParameters = new HashMap<String, String>();
    webappFragment.setContextParameters(jbossContextParameters);

    ArrayList<WebAppComponent> servlets = new ArrayList<WebAppComponent>();
    if (this.enableJaxws) {
        for (WsdlInfo wsdlInfo : getModelInternal().getNamespacesToWSDLs().values()) {
            for (EndpointInterface ei : wsdlInfo.getEndpointInterfaces()) {
                String path = (String) ei.getMetaData().get("soapPath");
                WebAppComponent wsComponent = new WebAppComponent();
                wsComponent.setName(ei.getServiceName());
                wsComponent.setClassname(ei.getEndpointImplementations().iterator().next().getQualifiedName());
                wsComponent.setUrlMappings(new TreeSet<String>(Arrays.asList(path)));
                servlets.add(wsComponent);
            }//from  w ww  . j  a v  a2 s  .co m
        }
    }

    if (this.enableJaxrs) {
        WebAppComponent jaxrsServletComponent = new WebAppComponent();
        jaxrsServletComponent.setName("resteasy-jaxrs");
        jaxrsServletComponent.setClassname(EnunciateJBossHttpServletDispatcher.class.getName());
        TreeSet<String> jaxrsUrlMappings = new TreeSet<String>();
        StringBuilder resources = new StringBuilder();
        for (RootResource rootResource : getModel().getRootResources()) {
            if (resources.length() > 0) {
                resources.append(',');
            }
            resources.append(rootResource.getQualifiedName());

            for (ResourceMethod resourceMethod : rootResource.getResourceMethods(true)) {
                String resourceMethodPattern = resourceMethod.getServletPattern();
                for (Set<String> subcontextList : ((Map<String, Set<String>>) resourceMethod.getMetaData()
                        .get("subcontexts")).values()) {
                    for (String subcontext : subcontextList) {
                        String servletPattern;
                        if ("".equals(subcontext)) {
                            servletPattern = resourceMethodPattern;
                        } else {
                            servletPattern = subcontext + resourceMethodPattern;
                        }

                        if (jaxrsUrlMappings.add(servletPattern)) {
                            debug("Resource method %s of resource %s to be made accessible by servlet pattern %s.",
                                    resourceMethod.getSimpleName(),
                                    resourceMethod.getParent().getQualifiedName(), servletPattern);
                        }
                    }
                }
            }
        }

        if (jaxrsUrlMappings.contains("/*")) {
            jaxrsUrlMappings.clear();
            jaxrsUrlMappings.add("/*");
        } else {
            Iterator<String> iterator = jaxrsUrlMappings.iterator();
            while (iterator.hasNext()) {
                String mapping = iterator.next();
                if (!mapping.endsWith("/*") && jaxrsUrlMappings.contains(mapping + "/*")) {
                    iterator.remove();
                }
            }
        }

        StringBuilder providers = new StringBuilder();
        for (TypeDeclaration provider : getModel().getJAXRSProviders()) {
            if (providers.length() > 0) {
                providers.append(',');
            }

            providers.append(provider.getQualifiedName());
        }

        if (jacksonAvailable) {
            if (providers.length() > 0) {
                providers.append(',');
            }

            providers.append("org.codehaus.enunciate.jboss.ResteasyJacksonJaxbProvider");
        }

        if (getEnunciate().isModuleEnabled("amf")) {
            if (providers.length() > 0) {
                providers.append(',');
            }

            providers.append("org.codehaus.enunciate.modules.amf.JAXRSProvider");
        }

        jaxrsServletComponent.setUrlMappings(jaxrsUrlMappings);
        jbossContextParameters.put(ResteasyContextParameters.RESTEASY_RESOURCES, resources.toString());
        jbossContextParameters.put(ResteasyContextParameters.RESTEASY_PROVIDERS, providers.toString());
        String mappingPrefix = this.useSubcontext ? getRestSubcontext() : "";
        if (!"".equals(mappingPrefix)) {
            jbossContextParameters.put("resteasy.servlet.mapping.prefix", mappingPrefix);
            jaxrsServletComponent.addInitParam("resteasy.servlet.mapping.prefix", mappingPrefix);
        }
        if (isUsePathBasedConneg()) {
            Map<String, String> contentTypesToIds = getModelInternal().getContentTypesToIds();
            if (contentTypesToIds != null && contentTypesToIds.size() > 0) {
                StringBuilder builder = new StringBuilder();
                Iterator<Map.Entry<String, String>> contentTypeIt = contentTypesToIds.entrySet().iterator();
                while (contentTypeIt.hasNext()) {
                    Map.Entry<String, String> contentTypeEntry = contentTypeIt.next();
                    builder.append(contentTypeEntry.getValue()).append(" : ").append(contentTypeEntry.getKey());
                    if (contentTypeIt.hasNext()) {
                        builder.append(", ");
                    }
                }
                jbossContextParameters.put(ResteasyContextParameters.RESTEASY_MEDIA_TYPE_MAPPINGS,
                        builder.toString());
            }
        }
        jbossContextParameters.put(ResteasyContextParameters.RESTEASY_SCAN_RESOURCES, Boolean.FALSE.toString());
        servlets.add(jaxrsServletComponent);
    }

    webappFragment.setServlets(servlets);
    if (!this.options.isEmpty()) {
        webappFragment.setContextParameters(this.options);
    }
    getEnunciate().addWebAppFragment(webappFragment);
}

From source file:org.codehaus.enunciate.modules.jersey.JerseyDeploymentModule.java

@Override
protected void doBuild() throws EnunciateException, IOException {
    super.doBuild();

    File webappDir = getBuildDir();
    webappDir.mkdirs();/*w  w w .j  a  va  2s . c om*/
    File webinf = new File(webappDir, "WEB-INF");
    File webinfClasses = new File(webinf, "classes");
    getEnunciate().copyFile(new File(getGenerateDir(), "jaxrs-providers.list"),
            new File(webinfClasses, "jaxrs-providers.list"));
    getEnunciate().copyFile(new File(getGenerateDir(), "jaxrs-root-resources.list"),
            new File(webinfClasses, "jaxrs-root-resources.list"));
    getEnunciate().copyFile(new File(getGenerateDir(), "jaxrs-jaxb-types.list"),
            new File(webinfClasses, "jaxrs-jaxb-types.list"));
    getEnunciate().copyFile(new File(getGenerateDir(), "media-type-mappings.properties"),
            new File(webinfClasses, "media-type-mappings.properties"));
    getEnunciate().copyFile(new File(getGenerateDir(), "ns2prefix.properties"),
            new File(webinfClasses, "ns2prefix.properties"));

    BaseWebAppFragment webappFragment = new BaseWebAppFragment(getName());
    webappFragment.setBaseDir(webappDir);
    WebAppComponent servletComponent = new WebAppComponent();
    servletComponent.setName("jersey");
    servletComponent.setClassname(EnunciateJerseyServletContainer.class.getName());
    TreeMap<String, String> initParams = new TreeMap<String, String>();
    initParams.putAll(getServletInitParams());
    if (!isUsePathBasedConneg()) {
        initParams.put(JerseyAdaptedHttpServletRequest.FEATURE_PATH_BASED_CONNEG, Boolean.FALSE.toString());
    }
    if (isUseSubcontext()) {
        initParams.put(JerseyAdaptedHttpServletRequest.PROPERTY_SERVLET_PATH, getRestSubcontext());
    }
    if (getResourceProviderFactory() != null) {
        initParams.put(JerseyAdaptedHttpServletRequest.PROPERTY_RESOURCE_PROVIDER_FACTORY,
                getResourceProviderFactory());
    }
    if (getApplicationClass() != null) {
        initParams.put("javax.ws.rs.Application", getApplicationClass());
    }
    if (getLoadOnStartup() != null) {
        servletComponent.setLoadOnStartup(getLoadOnStartup());
    }
    servletComponent.setInitParams(initParams);

    TreeSet<String> urlMappings = new TreeSet<String>();
    for (RootResource rootResource : getModel().getRootResources()) {
        for (ResourceMethod resourceMethod : rootResource.getResourceMethods(true)) {
            String resourceMethodPattern = resourceMethod.getServletPattern();
            for (Set<String> subcontextList : ((Map<String, Set<String>>) resourceMethod.getMetaData()
                    .get("subcontexts")).values()) {
                for (String subcontext : subcontextList) {
                    String servletPattern;
                    if ("".equals(subcontext)) {
                        servletPattern = isUseWildcardServletMapping() ? "/*" : resourceMethodPattern;
                    } else {
                        servletPattern = isUseWildcardServletMapping() ? subcontext + "/*"
                                : subcontext + resourceMethodPattern;
                    }

                    if (urlMappings.add(servletPattern)) {
                        debug("Resource method %s of resource %s to be made accessible by servlet pattern %s.",
                                resourceMethod.getSimpleName(), resourceMethod.getParent().getQualifiedName(),
                                servletPattern);
                    }
                }
            }
        }
    }

    if (urlMappings.contains("/*")) {
        urlMappings.clear();
        urlMappings.add("/*");
    } else {
        Iterator<String> iterator = urlMappings.iterator();
        while (iterator.hasNext()) {
            String mapping = iterator.next();
            if (!mapping.endsWith("/*") && urlMappings.contains(mapping + "/*")) {
                iterator.remove();
            }
        }
    }

    servletComponent.setUrlMappings(urlMappings);
    webappFragment.setServlets(Arrays.asList(servletComponent));
    getEnunciate().addWebAppFragment(webappFragment);
}

From source file:org.lockss.devtools.CrawlRuleTester.java

private void checkRules() {
    outputMessage("\nChecking " + m_baseUrl, TEST_SUMMARY_MESSAGE);
    outputMessage("crawl depth: " + m_crawlDepth + "     crawl delay: " + m_crawlDelay + " ms.", PLAIN_MESSAGE);

    TreeSet crawlList = new TreeSet();
    TreeSet fetched = new TreeSet();
    // inialize with the baseUrl
    crawlList.add(m_baseUrl);/*from w ww  .j  a  v  a2s .c o m*/
    depth_incl = new int[m_crawlDepth];
    depth_fetched = new int[m_crawlDepth];
    depth_parsed = new int[m_crawlDepth];
    long start_time = TimeBase.nowMs();
    for (int depth = 1; depth <= m_crawlDepth; depth++) {
        if (isInterrupted()) {
            return;
        }
        m_curDepth = depth;
        if (crawlList.isEmpty() && depth <= m_crawlDepth) {
            outputMessage("\nNothing left to crawl, exiting after depth " + (depth - 1), PLAIN_MESSAGE);
            break;
        }
        String[] urls = (String[]) crawlList.toArray(new String[0]);
        crawlList.clear();
        outputMessage("\nDepth " + depth, PLAIN_MESSAGE);
        for (int ix = 0; ix < urls.length; ix++) {
            if (isInterrupted()) {
                return;
            }
            pauseBeforeFetch();
            String urlstr = urls[ix];

            m_incls.clear();
            m_excls.clear();

            // crawl the page
            buildUrlSets(urlstr);
            fetched.add(urlstr);
            // output incl/excl results,
            // add the new_incls to the crawlList for next crawl depth loop
            crawlList.addAll(outputUrlResults(urlstr, m_incls, m_excls));
        }
    }
    long elapsed_time = TimeBase.nowMs() - start_time;
    outputSummary(m_baseUrl, fetched, crawlList, elapsed_time);
}

From source file:org.wso2.carbon.mdm.api.User.java

/**
 * Update user in user store//from   w w  w .  j a v a  2 s .  com
 *
 * @param userWrapper Wrapper object representing input json payload
 * @return {Response} Status of the request wrapped inside Response object
 * @throws MDMAPIException
 */
@PUT
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public Response updateUser(UserWrapper userWrapper, @QueryParam("username") String username)
        throws MDMAPIException {
    UserStoreManager userStoreManager = MDMAPIUtils.getUserStoreManager();
    ResponsePayload responsePayload = new ResponsePayload();
    try {
        if (userStoreManager.isExistingUser(userWrapper.getUsername())) {
            Map<String, String> defaultUserClaims = buildDefaultUserClaims(userWrapper.getFirstname(),
                    userWrapper.getLastname(), userWrapper.getEmailAddress());
            if (StringUtils.isNotEmpty(userWrapper.getPassword())) {
                // Decoding Base64 encoded password
                byte[] decodedBytes = Base64.decodeBase64(userWrapper.getPassword());
                userStoreManager.updateCredentialByAdmin(userWrapper.getUsername(),
                        new String(decodedBytes, "UTF-8"));
                log.debug("User credential of username: " + userWrapper.getUsername() + " has been changed");
            }
            List<String> listofFilteredRoles = getFilteredRoles(userStoreManager, userWrapper.getUsername());
            final String[] existingRoles = listofFilteredRoles.toArray(new String[listofFilteredRoles.size()]);

            /*
            Use the Set theory to find the roles to delete and roles to add
            The difference of roles in existingRolesSet and newRolesSet needed to be deleted
            new roles to add = newRolesSet - The intersection of roles in existingRolesSet and newRolesSet
            */
            final TreeSet<String> existingRolesSet = new TreeSet<>();
            Collections.addAll(existingRolesSet, existingRoles);
            final TreeSet<String> newRolesSet = new TreeSet<>();
            Collections.addAll(newRolesSet, userWrapper.getRoles());
            existingRolesSet.removeAll(newRolesSet);
            // Now we have the roles to delete
            String[] rolesToDelete = existingRolesSet.toArray(new String[existingRolesSet.size()]);
            List<String> roles = new ArrayList<>(Arrays.asList(rolesToDelete));
            roles.remove(ROLE_EVERYONE);
            rolesToDelete = new String[0];
            // Clearing and re-initializing the set
            existingRolesSet.clear();
            Collections.addAll(existingRolesSet, existingRoles);
            newRolesSet.removeAll(existingRolesSet);
            // Now we have the roles to add
            String[] rolesToAdd = newRolesSet.toArray(new String[newRolesSet.size()]);
            userStoreManager.updateRoleListOfUser(userWrapper.getUsername(), rolesToDelete, rolesToAdd);
            userStoreManager.setUserClaimValues(userWrapper.getUsername(), defaultUserClaims, null);
            // Outputting debug message upon successful addition of user
            if (log.isDebugEnabled()) {
                log.debug("User by username: " + userWrapper.getUsername() + " was successfully updated.");
            }
            // returning response with success state
            responsePayload.setStatusCode(HttpStatus.SC_CREATED);
            responsePayload.setMessageFromServer(
                    "User by username: " + userWrapper.getUsername() + " was successfully updated.");
            return Response.status(HttpStatus.SC_CREATED).entity(responsePayload).build();
        } else {
            if (log.isDebugEnabled()) {
                log.debug("User by username: " + userWrapper.getUsername()
                        + " doesn't exists. Therefore, request made to update user was refused.");
            }
            // returning response with bad request state
            responsePayload.setStatusCode(HttpStatus.SC_CONFLICT);
            responsePayload.setMessageFromServer("User by username: " + userWrapper.getUsername()
                    + " doesn't  exists. Therefore, request made to update user was refused.");
            return Response.status(HttpStatus.SC_CONFLICT).entity(responsePayload).build();
        }
    } catch (UserStoreException | UnsupportedEncodingException e) {
        String msg = "Exception in trying to update user by username: " + userWrapper.getUsername();
        log.error(msg, e);
        throw new MDMAPIException(msg, e);
    }
}

From source file:OSFFM_ORC.FederationActionManager.java

private JSONObject bnaNetSegCreate(JSONObject tables, DBMongo m, String refSite, String tenant,
        HashMap<String, Object> updNet) {

    JSONObject bnaSegTab = new JSONObject();
    JSONArray segRow = null;/* www  .  j  ava2  s .co  m*/
    JSONObject subJSON = null;
    Integer version = null;
    UUID uuid = null;
    String fedNet = "";
    //boolean resultIns = false;
    TreeSet<String> fednets = new TreeSet<String>();
    try {
        //fedNet = tables.getString("name");
        version = tables.getInt("version");
        JSONArray bigArray = (JSONArray) tables.get("table");
        //uuid=UUID.randomUUID();
        // JSONArray littleArray;
        for (int i = 0; i < bigArray.length(); i++) {

            uuid = UUID.randomUUID();
            JSONArray innerArray = (JSONArray) bigArray.get(i);
            for (int j = 0; j < innerArray.length(); j++) {

                JSONObject objectJson = (JSONObject) innerArray.get(j);
                fedNet = objectJson.getString("name"); //***ATTENZIONARE PERCHE NEL CASO DI OPENNEBULA LE FEDNET ALL'INTERNO DELL'INNERARRAY POTREBBERO AVERE NOMI DIVERSI DUNQUE SI PER L'INFORMAZIONE
                fednets.add(fedNet);
                bnaSegTab.put("FK", uuid.toString());
                // bnaSegTab.put("fedNet",objectJson.get("name"));
                bnaSegTab.put("netEntry", objectJson);//QUESTO  objectJson: { "tenant_id" : "b0edb3a0ae3842b2a3f3969f07cd82f2", "site_name" : "CETIC", "vnid" : "d46a55d4-6cca-4d86-bf25-f03707680795", "name" : "provider" }
                m.insertNetTables(tenant, bnaSegTab.toString(0));

            }
            m.insertTablesData(uuid.toString(), tenant, version, refSite, fedNet); //ATTENZIONARE VEDI COMMENTO ***
        }
        updNet.put(refSite, fednets.clone());
        Iterator iter = fednets.iterator();
        while (iter.hasNext()) {
            System.out.println(iter.next());
        }
        fednets.clear();
    } catch (JSONException ex) {
        System.out.println("-___-' Error: " + ex.getMessage());
    } catch (MDBIException ex) {
        System.out.println("-___-' Error: " + ex.getMessage());
    }

    return bnaSegTab;
}