Example usage for java.util HashSet size

List of usage examples for java.util HashSet size

Introduction

In this page you can find the example usage for java.util HashSet size.

Prototype

public int size() 

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:org.jactr.core.module.declarative.search.local.DefaultSearchSystem.java

/**
 * this implementation fails fast//  w  w  w .  j a v a  2 s.  c  o  m
 * 
 * @see org.jactr.core.module.declarative.search.ISearchSystem#findExact(ChunkTypeRequest,
 *      java.util.Comparator)
 */
public Collection<IChunk> findExact(ChunkTypeRequest pattern, Comparator<IChunk> sortRule) {

    /*
     * second pass, ditch all those that don't match our chunktype
     */
    HashSet<IChunk> candidates = new HashSet<IChunk>();
    IChunkType chunkType = pattern.getChunkType();
    if (chunkType != null)
        candidates.addAll(chunkType.getSymbolicChunkType().getChunks());

    /*
     * first things first, find all the candidates based on the content of the
     * pattern
     */
    boolean first = chunkType == null;
    for (IConditionalSlot slot : pattern.getConditionalSlots()) {
        if (first) {
            candidates.addAll(find(slot));
            first = false;
        } else
            candidates.retainAll(find(slot));

        if (candidates.size() == 0)
            break;
    }

    if (LOGGER.isDebugEnabled())
        LOGGER.debug("First pass candidates for " + pattern + " chunks: " + candidates);

    if (sortRule != null) {
        /*
         * finally, we sort them
         */
        TreeSet<IChunk> sortedResults = new TreeSet<IChunk>(sortRule);
        sortedResults.addAll(candidates);

        return sortedResults;
    }

    return candidates;
}

From source file:de.uni_koblenz.jgralab.utilities.csv2tg.Csv2Tg.java

private String[] getFilesInFolder(String[] filenames) {
    HashSet<String> fileList = new HashSet<>();
    for (String filename : filenames) {

        File file = new File(filename).getAbsoluteFile();
        if (!file.exists()) {
            throw new RuntimeException("File or folder \"" + filename + "\" does not exist!");
        }//from  w  w w  . ja va2  s  .  co  m
        if (file.isDirectory()) {
            for (File foundFile : file.listFiles(this)) {
                fileList.add(foundFile.getAbsolutePath());
            }
        } else {
            fileList.add(file.getAbsolutePath());
        }
    }

    if (fileList.isEmpty()) {
        throw new RuntimeException("No csv-files to convert to a tg-file.");
    }

    String[] result = new String[fileList.size()];
    return fileList.toArray(result);
}

From source file:org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyConfigurable.java

/**
 * Picks up a remote machine within defined window
 * @param rackIdx rack the request is coming from and that should be avoided
 * @param firstRack rack that starts window
 * @param rackWindow rack window size//from  www . j a v a2s  .  c  o m
 * @param machineIdx index of first replica within its rack
 * @param windowSize size of the machine window
 * @param excludedNodes list of black listed nodes.
 * @param blocksize size of a block
 * @param maxReplicasPerRack maximum number of replicas per rack
 * @param results List of results
 * @param reverse adjustment when looking forward or backward.
 * @return
 * @throws NotEnoughReplicasException
 */
protected boolean chooseRemoteRack(int rackIdx, int firstRack, int rackWindow, int machineIdx, int windowSize,
        HashMap<Node, Node> excludedNodes, long blocksize, int maxReplicasPerRack,
        List<DatanodeDescriptor> results, boolean reverse) throws NotEnoughReplicasException {
    // randomly choose one node from remote racks

    readLock();
    try {
        HashSet<Integer> excludedRacks = new HashSet<Integer>();
        excludedRacks.add(rackIdx);
        int n = racks.size();
        int currRackSize = racksMap.get(racks.get(rackIdx)).rackNodes.size();
        while (excludedRacks.size() < rackWindow) {

            int newRack = randomIntInWindow(firstRack, rackWindow, n, excludedRacks);
            if (newRack < 0)
                break;

            excludedRacks.add(newRack);

            int newRackSize = racksMap.get(racks.get(newRack)).rackNodes.size();
            int firstMachine = machineIdx * newRackSize / currRackSize;

            int newWindowSize = windowSize;
            if (reverse) {
                firstMachine = ((int) Math.ceil((double) machineIdx * newRackSize / currRackSize))
                        % newRackSize;

                newWindowSize = Math.max(1, windowSize * newRackSize / currRackSize);
            }

            if (newWindowSize <= 0) {
                continue;
            }

            if (chooseMachine(racks.get(newRack), firstMachine, newWindowSize, excludedNodes, blocksize,
                    maxReplicasPerRack, results)) {
                return true;
            }
        }
        return false;
    } finally {
        readUnlock();
    }
}

From source file:de.dfki.km.perspecting.obie.experiments.PhraseExperiment.java

/**
 * Test method for//from  w  w  w.  j  a v  a 2 s  . c o  m
 * {@link de.dfki.km.perspecting.obie.dixi.service.SimpleScobieService#extractInformationFromURL(java.lang.String, java.lang.String)}
 * .
 */
@Test
public void analyseTokenPhraseFrequencies() {
    final String template = "SELECT * WHERE {?s ?p ?o}";

    try {
        final BufferedWriter bw = new BufferedWriter(
                new FileWriter($SCOOBIE_HOME + "results/token_phrase_frequency_wikipedia.csv"));

        final String randomWikipediaPage = "http://en.wikipedia.org/wiki/Special:Random";

        bw.append("tok in doc\tnp in doc\ttok in nps\tdistinct tok in nps\tdistinct tok in doc");
        for (int i = 0; i < 100; i++) {

            Document document = pipeline.createDocument(FileUtils.toFile(new URL(randomWikipediaPage)),
                    new URI(randomWikipediaPage), MediaType.HTML, template, Language.EN);

            for (int step = 0; pipeline.hasNext(step) && step <= 5; step = pipeline.execute(step, document)) {
                System.out.println(step);
            }

            HashSet<String> wordsOfPhrases = new HashSet<String>();
            HashSet<String> wordsOfDocument = new HashSet<String>();

            for (Token token : document.getTokens()) {
                wordsOfDocument.add(token.toString());
            }

            int count = 0;
            for (TokenSequence<String> np : document.getNounPhrases()) {
                String[] words = np.toString().split("[\\s]+");
                count += words.length;
                wordsOfPhrases.addAll(Arrays.asList(words));
            }

            bw.append(document.getTokens().size() + "\t" + document.getNounPhrases().size() + "\t" + count
                    + "\t" + wordsOfPhrases.size() + "\t" + wordsOfDocument.size());
            bw.newLine();

        }
        bw.close();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    try {
        final BufferedWriter bw = new BufferedWriter(
                new FileWriter($SCOOBIE_HOME + "results/token_phrase_frequency_reuters.csv"));

        final TextCorpus corpus = new TextCorpus(new File("../corpora/reuters/reuters.zip"), MediaType.ZIP,
                MediaType.HTML, Language.EN);

        bw.append("tok in doc\tnp in doc\ttok in nps\tdistinct tok in nps\tdistinct tok in doc");

        corpus.forEach(new DocumentProcedure<URI>() {

            @Override
            public URI process(Reader reader, URI uri) throws Exception {

                Document document = pipeline.createDocument(reader, uri, corpus.getMediatype(), template,
                        corpus.getLanguage());

                for (int step = 0; pipeline.hasNext(step)
                        && step <= 5; step = pipeline.execute(step, document)) {
                    System.out.println(step);
                }

                HashSet<String> wordsOfPhrases = new HashSet<String>();
                HashSet<String> wordsOfDocument = new HashSet<String>();

                for (Token token : document.getTokens()) {
                    wordsOfDocument.add(token.toString());
                }

                int count = 0;
                for (TokenSequence<String> np : document.getNounPhrases()) {
                    String[] words = np.toString().split("[\\s]+");
                    count += words.length;
                    wordsOfPhrases.addAll(Arrays.asList(words));
                }

                bw.append(document.getTokens().size() + "\t" + document.getNounPhrases().size() + "\t" + count
                        + "\t" + wordsOfPhrases.size() + "\t" + wordsOfDocument.size());
                bw.newLine();
                return uri;
            }
        });

        bw.close();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:org.apache.phoenix.jdbc.SecureUserConnectionsIT.java

@Test
public void testAlternatingConnectionsWithoutLogin() throws Exception {
    final HashSet<ConnectionInfo> connections = new HashSet<>();
    final String princ1 = getUserPrincipal(1);
    final File keytab1 = getUserKeytabFile(1);
    final String princ2 = getUserPrincipal(2);
    final File keytab2 = getUserKeytabFile(2);
    final String url1 = joinUserAuthentication(BASE_URL, princ1, keytab1);
    final String url2 = joinUserAuthentication(BASE_URL, princ2, keytab2);

    // Using the same UGI should result in two equivalent ConnectionInfo objects
    connections.add(ConnectionInfo.create(url1).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES));
    assertEquals(1, connections.size());
    // Sanity check
    verifyAllConnectionsAreKerberosBased(connections);

    // Because the UGI instances are unique, so are the connections
    connections.add(ConnectionInfo.create(url2).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES));
    assertEquals(2, connections.size());
    verifyAllConnectionsAreKerberosBased(connections);

    // Using the same UGI should result in two equivalent ConnectionInfo objects
    connections.add(ConnectionInfo.create(url1).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES));
    assertEquals(3, connections.size());
    // Sanity check
    verifyAllConnectionsAreKerberosBased(connections);
}

From source file:och.comp.billing.standalone.BillingSyncService.java

public int doSyncWork(boolean checkWorkTime, Date nowPreset, CallableVoid beforeDbUpdateListener)
        throws Exception {

    Date now = nowPreset != null ? nowPreset : new Date();

    if (props.getBoolVal(billing_sync_debug_DisableSync))
        return -1;
    if (props.getBoolVal(toolMode))
        return -1;

    // ? ?/*from  www .  j a  v  a2  s.c o  m*/
    if (checkWorkTime && props.getBoolVal(billing_sync_debug_CheckWorkTime)) {
        int dayOfMonth = dayOfMonth(now);
        int endDay = props.getIntVal(billing_sync_endSyncDay);
        int startDay = props.getIntVal(billing_sync_startSyncDay);
        if (dayOfMonth < startDay)
            return -2;
        if (dayOfMonth > endDay)
            return -3;

        HoursAndMinutes nowHHmm = getHoursAndMinutes(now);
        if (dayOfMonth == startDay) {
            HoursAndMinutes startHHmm = tryParseHHmm(props.getStrVal(billing_sync_startSyncTime), null);
            if (startHHmm != null && nowHHmm.compareTo(startHHmm) < 0)
                return -2;
        }
        if (dayOfMonth == endDay) {
            HoursAndMinutes endHHmm = tryParseHHmm(props.getStrVal(billing_sync_endSyncTime), null);
            if (endHHmm != null && nowHHmm.compareTo(endHHmm) > 0)
                return -3;
        }
    }

    Date curMonthStart = monthStart(now);

    //get all accs
    HashSet<Long> needPayAccs = new HashSet<Long>();
    HashMap<Long, ChatAccount> accsById = new HashMap<>();
    List<ChatAccount> allAccs = universal.select(new GetAllChatAccounts());
    for (ChatAccount acc : allAccs) {
        accsById.put(acc.id, acc);
        if (isNeedToPay(acc, curMonthStart))
            needPayAccs.add(acc.id);
    }

    if (props.getBoolVal(billing_sync_log))
        log.info("sync accs to pay (" + needPayAccs.size() + "): " + needPayAccs);
    if (isEmpty(needPayAccs)) {
        saveLastSyncInfo(0);
        return 0;
    }

    //get tariffs
    List<Tariff> tariffs = universal.select(new GetAllTariffs());
    HashMap<Long, Tariff> tariffsById = new HashMap<>();
    for (Tariff t : tariffs)
        tariffsById.put(t.id, t);

    //find owners
    HashMap<Long, Set<ChatAccount>> accsByUser = new HashMap<>();
    List<ChatAccountPrivileges> allUsersPrivs = universal.select(new GetAllChatAccountPrivileges());
    for (ChatAccountPrivileges data : allUsersPrivs) {
        if (data.privileges.contains(CHAT_OWNER)) {
            ChatAccount acc = accsById.get(data.accId);
            if (acc == null)
                continue;
            putToSetMap(accsByUser, data.userId, acc);
        }
    }

    if (beforeDbUpdateListener != null)
        beforeDbUpdateListener.call();

    //sync by owners
    ArrayList<SyncPayError> syncErrors = new ArrayList<>();
    for (Entry<Long, Set<ChatAccount>> entry : accsByUser.entrySet()) {
        Long userId = entry.getKey();
        Set<ChatAccount> userAccs = entry.getValue();
        try {

            if (syncAccsListener != null)
                syncAccsListener.call();

            List<SyncPayError> curErrors = syncUserAccs(userId, userAccs, tariffsById, curMonthStart, now);
            if (curErrors.size() > 0)
                syncErrors.addAll(curErrors);

        }
        // ? ,    ? ?
        //?  ?   ?  
        catch (ConcurrentUpdateSqlException e) {
            //?    
            for (ChatAccount acc : userAccs)
                needPayAccs.remove(acc.id);
        } catch (Throwable t) {
            log.error("can't sync accs for user=" + userId + ": " + t);
            syncErrors.add(new SyncPayError(userId, userAccs, t));
        }
    }

    if (syncErrors.size() > 0)
        sendSyncErrorMailToAdmin("Sync billing errors", syncErrors);

    int updated = needPayAccs.size();

    saveLastSyncInfo(updated);

    return updated;
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, as well as
 * retrieving existing attributes.//w  w w . j  a v a 2  s . c om
 */
@Test
public void testGetOrAssignStreamSegmentId() {
    final int segmentCount = 10;
    final int transactionsPerSegment = 5;
    final long noSegmentId = ContainerMetadata.NO_STREAM_SEGMENT_ID;
    AtomicLong currentSegmentId = new AtomicLong(Integer.MAX_VALUE);
    Supplier<Long> nextSegmentId = () -> currentSegmentId.decrementAndGet() % 2 == 0 ? noSegmentId
            : currentSegmentId.get();

    @Cleanup
    TestContext context = new TestContext();

    HashSet<String> storageSegments = new HashSet<>();
    for (int i = 0; i < segmentCount; i++) {
        String segmentName = getName(i);
        storageSegments.add(segmentName);
        setAttributes(segmentName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT, context);

        for (int j = 0; j < transactionsPerSegment; j++) {
            // There is a small chance of a name conflict here, but we don't care. As long as we get at least one
            // Transaction per segment, we should be fine.
            String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
                    UUID.randomUUID());
            storageSegments.add(transactionName);
            setAttributes(transactionName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT,
                    context);
        }
    }

    // We setup all necessary handlers, except the one for create. We do not need to create new Segments here.
    setupOperationLog(context);
    Predicate<String> isSealed = segmentName -> segmentName.hashCode() % 2 == 0;
    Function<String, Long> getInitialLength = segmentName -> (long) Math.abs(segmentName.hashCode());
    setupStorageGetHandler(context, storageSegments, segmentName -> new StreamSegmentInformation(segmentName,
            getInitialLength.apply(segmentName), isSealed.test(segmentName), false, new ImmutableDate()));

    // First, map all the parents (stand-alone segments).
    for (String name : storageSegments) {
        if (StreamSegmentNameUtils.getParentStreamSegmentName(name) == null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for StreamSegment " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for StreamSegment " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
        }
    }

    // Now, map all the Transactions.
    for (String name : storageSegments) {
        String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(name);
        if (parentName != null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for Transaction " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for Transaction " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for Transaction " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for Transaction " + name, expectedAttributes, sm);

            // Check parenthood.
            Assert.assertNotEquals("No parent defined in metadata for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, sm.getParentId());
            long parentId = context.metadata.getStreamSegmentId(parentName, false);
            Assert.assertEquals("Unexpected parent defined in metadata for Transaction " + name, parentId,
                    sm.getParentId());
        }
    }
}

From source file:com.redhat.rhn.manager.kickstart.KickstartFormatter.java

private String getRhnPost() {
    log.debug("getRhnPost called.");
    StringBuilder retval = new StringBuilder();
    retval.append("%" + KickstartScript.TYPE_POST);
    addLogBegin(retval, RHN_LOG_FILE, "");
    retval.append(BEGINRHN_LOG_APPEND);// w  w w.  ja v a  2s .c o m

    retval.append(renderKeys() + NEWLINE);

    List<ActivationKey> tokens = generateActKeyTokens(this.ksdata, this.session);

    HashSet updatePackages = getUpdatePackages(tokens);
    HashSet freshPackages = getFreshPackages(tokens);
    boolean isFresh = freshPackages.size() > 0;
    boolean isUpdate = updatePackages.size() > 0;

    // update the required/optional packages needed for the kickstart
    if (isUpdate || isFresh) {
        log.debug("need latest up2date");
        //order matters, therfore multiple logic branches
        retval.append(MKDIR_OPTIONAL + NEWLINE);
        if (isUpdate) {
            //wregglej - wget is broken, so workaround it.
            retval.append(CHDIR_OPT_RPMS + NEWLINE);

            retval.append(WGET_OPT_RPMS);
            for (Iterator itr = updatePackages.iterator(); itr.hasNext();) {
                retval.append(itr.next().toString() + SPACE);
            }
            retval.append(NEWLINE);
        }
        if (isFresh) {
            //wregglej - work around wget again.
            retval.append(CHDIR_RPMS + NEWLINE);

            retval.append(WGET_RPMS);
            for (Iterator itr = freshPackages.iterator(); itr.hasNext();) {
                retval.append(itr.next().toString() + SPACE);
            }
            retval.append(NEWLINE);
        }
        if (isUpdate) {
            retval.append(UPDATE_CMD);
            for (int i = 0; i < UPDATE_PKG_NAMES.length; i++) {
                retval.append(UPDATE_OPT_PATH + UPDATE_PKG_NAMES[i] + "* ");
            }
            retval.append(NEWLINE);
        }
        if (isFresh) {
            retval.append(FRESH_CMD + NEWLINE);
        }
    }

    if (this.ksdata.getKickstartDefaults().getVirtualizationType().getLabel().equals("para_host")) {
        retval.append(VIRT_HOST_GRUB_FIX);
    }

    // For rhel2,3,4 we import a different key.  otherwise we just
    // rely on the cobbler snippet below to import the key.
    if (this.ksdata.isRhel2()) {
        retval.append(IMPORT_RHN_KEY2 + NEWLINE);
    } else if (this.ksdata.isRhel3() || this.ksdata.isRhel4()) {
        retval.append(IMPORT_RHN_KEY34 + NEWLINE);
    }

    if (log.isDebugEnabled()) {
        log.debug("kickstart_host: [" + XMLRPC_HOST + "] kshost: [" + this.ksHost + "] indexof: "
                + this.ksHost.indexOf(XMLRPC_HOST));
    }

    String up2datehost = REDHAT_MGMT_SERVER;
    //check if server going through Spacewalk Proxy,
    //if so, register through proxy instead
    if (this.session != null && this.session.getSystemRhnHost() != null
            && !this.session.getSystemRhnHost().equals("unknown")) {
        up2datehost = this.session.getSystemRhnHost();
    }

    log.debug("adding perl -npe for /etc/sysconfig/rhn/up2date");
    if (this.ksdata.isRhel2()) {
        retval.append("perl -npe " + "'s|^(\\s*(noSSLS\\|s)erverURL\\s*=\\s*[^:]+://)[^/]*/|${1}" + up2datehost
                + "/|' -i /etc/sysconfig/rhn/rhn_register" + NEWLINE);
    }
    // both rhel 2 and rhel3/4 need the following
    retval.append("perl -npe " + "'s|^(\\s*(noSSLS\\|s)erverURL\\s*=\\s*[^:]+://)[^/]*/|\\${1}" + up2datehost
            + "/|' -i /etc/sysconfig/rhn/up2date" + NEWLINE);

    if (this.ksdata.getVerboseUp2date()) {
        retval.append("[ -r /etc/yum.conf ] && " + "perl -npe 's/debuglevel=2/debuglevel=5/' -i /etc/yum.conf"
                + NEWLINE);
        retval.append("[ -r /etc/sysconfig/rhn/up2date ] && "
                + "perl -npe 's/debug=0/debug=1/' -i /etc/sysconfig/rhn/up2date" + NEWLINE);
    }

    if (this.ksdata.getKickstartDefaults().getRemoteCommandFlag().booleanValue()) {
        retval.append(REMOTE_CMD + NEWLINE);
    }

    if (this.ksdata.getKickstartDefaults().getCfgManagementFlag().booleanValue()) {
        retval.append(CONFIG_CMD + NEWLINE);
    }

    retval.append(NEWLINE);
    retval.append(KSTREE);
    retval.append(NEWLINE);

    //RHEL 5u4 hack for bz 495680
    if (ksdata.isRhel5()) {
        retval.append("/etc/init.d/messagebus restart" + NEWLINE);
        retval.append("/etc/init.d/haldaemon restart" + NEWLINE);
    }
    retval.append("# begin cobbler snippet" + NEWLINE);
    addCobblerSnippet(retval, DEFAULT_MOTD);
    addCobblerSnippet(retval, REDHAT_REGISTER_SNIPPET);
    retval.append("# end cobbler snippet" + NEWLINE);

    retval.append(NEWLINE);
    retval.append(RHNCHECK + NEWLINE);
    addLogEnd(retval, RHN_LOG_FILE, "");

    retval.append(NEWLINE);
    // Work around for bug #522251
    if (!this.ksdata.getKickstartDefaults().getKstree().getChannel().getChannelArch().getName()
            .startsWith("s390")) {
        addCobblerSnippet(retval, "post_install_network_config");
    }
    addEnd(retval);
    return retval.toString();
}

From source file:export.UploadManager.java

public void loadWorkouts(final HashSet<WorkoutRef> pendingWorkouts, final Callback callback) {
    int cnt = pendingWorkouts.size();
    mSpinner.setTitle("Downloading workouts (" + cnt + ")");
    mSpinner.show();// w w  w .  j a v a2  s  . c  o m
    new AsyncTask<String, String, Uploader.Status>() {

        @Override
        protected void onProgressUpdate(String... values) {
            mSpinner.setMessage("Loading " + values[0] + " from " + values[1]);
        }

        @Override
        protected Uploader.Status doInBackground(String... params0) {
            for (WorkoutRef ref : pendingWorkouts) {
                publishProgress(ref.workoutName, ref.uploader);
                Uploader uploader = uploaders.get(ref.uploader);
                //TODO Add back in from runnerup
                // File f = ActivitySerializer.getFile(context, ref.workoutName);
                //File w = f;
                //if (f.exists()) {
                //w = ActivitySerializer.getFile(context, ref.workoutName + ".tmp");
                //}
                try {
                    //  uploader.downloadWorkout(w, ref.workoutKey);
                    //if (w != f) {
                    //  if (compareFiles(w, f) != true) {
                    //    System.err.println("overwriting " + f.getPath() + " with "
                    //          + w.getPath());
                    // TODO dialog
                    //f.delete();
                    //w.renameTo(f);
                    //} else {
                    //  System.err.println("file identical...deleting temporary "
                    //        + w.getPath());
                    //w.delete();
                    //}
                    //}
                } catch (Exception e) {
                    e.printStackTrace();
                    //w.delete();
                }
            }
            return Uploader.Status.OK;
        }

        @Override
        protected void onPostExecute(Uploader.Status result) {
            mSpinner.dismiss();
            if (callback != null) {
                callback.run(null, Uploader.Status.OK);
            }
        }
    }.execute("string");
}

From source file:org.owasp.jbrofuzz.core.Database.java

/**
 * <p>Return all the unique categories found across prototypes that are loaded
 * into the database.</p>/* w  ww. ja v a 2  s  .c om*/
 * 
 * <p>Category examples include: "Replacive Fuzzers", "Exploits", etc.</p>
 * 
 * @return String[] uniqueCategories
 * 
 * @author subere@uncon.org
 * @version 1.5
 * @since 1.2
 */
public String[] getAllCategories() {

    final HashSet<String> o = new HashSet<String>();

    final String[] ids = getAllPrototypeIDs();
    for (final String id : ids) {

        final List<String> catArrayList = prototypes.get(id).getCategories();
        final String[] categoriesArray = new String[catArrayList.size()];
        catArrayList.toArray(categoriesArray);

        for (final String cCategory : categoriesArray) {

            o.add(cCategory);

        }

    }

    final String[] uCategoriesArray = new String[o.size()];
    o.toArray(uCategoriesArray);

    return uCategoriesArray;

}