Example usage for java.util Collections synchronizedSet

List of usage examples for java.util Collections synchronizedSet

Introduction

In this page you can find the example usage for java.util Collections synchronizedSet.

Prototype

public static <T> Set<T> synchronizedSet(Set<T> s) 

Source Link

Document

Returns a synchronized (thread-safe) set backed by the specified set.

Usage

From source file:HSqlManager.java

@SuppressWarnings("Duplicates")
@Deprecated/*from  w w  w. j  av a2  s .  c o m*/
private static void mycoCommonInitialize(int bps, Connection connection) throws SQLException, IOException {
    long time = System.currentTimeMillis();
    String base = new File("").getAbsolutePath();
    CSV.makeDirectory(new File(base + "/PhageData"));
    INSTANCE = ImportPhagelist.getInstance();
    //        INSTANCE.parseAllPhages(bps);
    written = true;
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    stat.execute("SET FILES LOG FALSE\n");
    PreparedStatement st = db.prepareStatement("Insert INTO Primerdb.Primers"
            + "(Bp,Sequence, CommonP, UniqueP, Picked, Strain, Cluster)" + " Values(?,?,true,false,false,?,?)");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    String strain = "";
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
        if (r[2].equals("xkcd")) {
            strain = r[0];
        }
    }
    call.close();
    String x = strain;
    Set<String> clust = phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet());
    Map<String, List<String>> clusters = new HashMap<>();
    clust.parallelStream().forEach(cluster -> clusters.put(cluster, phages.stream()
            .filter(a -> a[0].equals(x) && a[1].equals(cluster)).map(a -> a[2]).collect(Collectors.toList())));
    for (String z : clusters.keySet()) {
        try {
            List<String> clustphages = clusters.get(z);
            Set<String> primers = Collections.synchronizedSet(
                    CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + clustphages.get(0) + ".csv"));
            clustphages.remove(0);
            for (String phage : clustphages) {
                //                    String[] seqs = Fasta.parse(base + "/Fastas/" + phage + ".fasta");
                //                    String sequence =seqs[0]+seqs[1];
                //                    Map<String, List<Integer>> seqInd = new HashMap<>();
                //                    for (int i = 0; i <= sequence.length()-bps; i++) {
                //                        String sub=sequence.substring(i,i+bps);
                //                        if(seqInd.containsKey(sub)){
                //                            seqInd.get(sub).add(i);
                //                        }else {
                //                            List<Integer> list = new ArrayList<>();
                //                            list.add(i);
                //                            seqInd.put(sub,list);
                //                        }
                //                    }
                //                    primers = primers.stream().filter(seqInd::containsKey).collect(Collectors.toSet());
                //                    primers =Sets.intersection(primers,CSV.readCSV(base + "/PhageData/"+Integer.toString(bps)
                //                            + phage + ".csv"));
                //                    System.gc();
                //                            String[] seqs = Fasta.parse(base + "/Fastas/" + phage + ".fasta");
                //                            String sequence =seqs[0]+seqs[1];
                //                            primers.stream().filter(sequence::contains);
                primers.retainAll(CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv"));
                //                    Set<CharSequence> prim = primers;
                //                    for (CharSequence primer: primers){
                //                        if(seqInd.containsKey(primer)){
                //                            prim.remove(primer);
                //                        }
                //                    }
                //                    primers=prim;
            }
            int i = 0;
            for (String a : primers) {
                try {
                    //finish update
                    st.setInt(1, bps);
                    st.setString(2, a);
                    st.setString(3, x);
                    st.setString(4, z);
                    st.addBatch();
                } catch (SQLException e) {
                    e.printStackTrace();
                    System.out.println("Error occurred at " + x + " " + z);
                }
                i++;
                if (i == 1000) {
                    i = 0;
                    st.executeBatch();
                    db.commit();
                }
            }
            if (i > 0) {
                st.executeBatch();
                db.commit();
            }
        } catch (SQLException e) {
            e.printStackTrace();
            System.out.println("Error occurred at " + x + " " + z);
        }
        System.out.println(z);
    }
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Common Updated");
    System.out.println((System.currentTimeMillis() - time) / Math.pow(10, 3) / 60);
}

From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java

private void performVerification(final Set<NodeIdentifier> nodeIds, final String method, final URI uri,
        final Object entity, final Map<String, String> headers,
        final StandardAsyncClusterResponse clusterResponse, final boolean merge, final Object monitor) {
    logger.debug("Verifying that mutable request {} {} can be made", method, uri.getPath());

    final Map<String, String> validationHeaders = new HashMap<>(headers);
    validationHeaders.put(REQUEST_VALIDATION_HTTP_HEADER, NODE_CONTINUE);

    final long startNanos = System.nanoTime();
    final int numNodes = nodeIds.size();
    final NodeRequestCompletionCallback completionCallback = new NodeRequestCompletionCallback() {
        final Set<NodeResponse> nodeResponses = Collections.synchronizedSet(new HashSet<>());

        @Override/*from  w w w .  jav a  2  s. co m*/
        public void onCompletion(final NodeResponse nodeResponse) {
            // Add the node response to our collection. We later need to know whether or
            // not this is the last node response, so we add the response and then check
            // the size within a synchronized block to ensure that those two things happen
            // atomically. Otherwise, we could have multiple threads checking the sizes of
            // the sets at the same time, which could result in multiple threads performing
            // the 'all nodes are complete' logic.
            final boolean allNodesResponded;
            synchronized (nodeResponses) {
                nodeResponses.add(nodeResponse);
                allNodesResponded = nodeResponses.size() == numNodes;
            }

            try {
                final long nanos = System.nanoTime() - startNanos;
                clusterResponse.addTiming("Completed Verification", nodeResponse.getNodeId().toString(), nanos);

                // If we have all of the node responses, then we can verify the responses
                // and if good replicate the original request to all of the nodes.
                if (allNodesResponded) {
                    clusterResponse.addTiming("Verification Completed", "All Nodes", nanos);

                    // Check if we have any requests that do not have a 150-Continue status code.
                    final long dissentingCount = nodeResponses.stream()
                            .filter(p -> p.getStatus() != NODE_CONTINUE_STATUS_CODE).count();

                    // If all nodes responded with 150-Continue, then we can replicate the original request
                    // to all nodes and we are finished.
                    if (dissentingCount == 0) {
                        logger.debug(
                                "Received verification from all {} nodes that mutable request {} {} can be made",
                                numNodes, method, uri.getPath());
                        replicate(nodeIds, method, uri, entity, headers, false, clusterResponse, true, merge,
                                monitor);
                        return;
                    }

                    try {
                        final Map<String, String> cancelLockHeaders = new HashMap<>(headers);
                        cancelLockHeaders.put(REQUEST_TRANSACTION_CANCELATION_HTTP_HEADER, "true");
                        final Thread cancelLockThread = new Thread(new Runnable() {
                            @Override
                            public void run() {
                                logger.debug("Found {} dissenting nodes for {} {}; canceling claim request",
                                        dissentingCount, method, uri.getPath());

                                final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(
                                        nodeId, method, createURI(uri, nodeId), entity, cancelLockHeaders, null,
                                        clusterResponse);

                                submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory,
                                        cancelLockHeaders);
                            }
                        });
                        cancelLockThread.setName("Cancel Flow Locks");
                        cancelLockThread.start();

                        // Add a NodeResponse for each node to the Cluster Response
                        // Check that all nodes responded successfully.
                        for (final NodeResponse response : nodeResponses) {
                            if (response.getStatus() != NODE_CONTINUE_STATUS_CODE) {
                                final Response clientResponse = response.getClientResponse();

                                final String message;
                                if (clientResponse == null) {
                                    message = "Node " + response.getNodeId()
                                            + " is unable to fulfill this request due to: Unexpected Response Code "
                                            + response.getStatus();

                                    logger.info(
                                            "Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. The action will not occur",
                                            response.getStatus(), response.getNodeId(), method, uri.getPath());
                                } else {
                                    final String nodeExplanation = clientResponse.readEntity(String.class);
                                    message = "Node " + response.getNodeId()
                                            + " is unable to fulfill this request due to: " + nodeExplanation;

                                    logger.info(
                                            "Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. "
                                                    + "The action will not occur. Node explanation: {}",
                                            response.getStatus(), response.getNodeId(), method, uri.getPath(),
                                            nodeExplanation);
                                }

                                // if a node reports forbidden, use that as the response failure
                                final RuntimeException failure;
                                if (response.getStatus() == Status.FORBIDDEN.getStatusCode()) {
                                    if (response.hasThrowable()) {
                                        failure = new AccessDeniedException(message, response.getThrowable());
                                    } else {
                                        failure = new AccessDeniedException(message);
                                    }
                                } else {
                                    if (response.hasThrowable()) {
                                        failure = new IllegalClusterStateException(message,
                                                response.getThrowable());
                                    } else {
                                        failure = new IllegalClusterStateException(message);
                                    }
                                }

                                clusterResponse.setFailure(failure, response.getNodeId());
                            }
                        }
                    } finally {
                        if (monitor != null) {
                            synchronized (monitor) {
                                monitor.notify();
                            }

                            logger.debug(
                                    "Notified monitor {} because request {} {} has failed due to at least 1 dissenting node",
                                    monitor, method, uri);
                        }
                    }
                }
            } catch (final Exception e) {
                clusterResponse.add(new NodeResponse(nodeResponse.getNodeId(), method, uri, e));

                // If there was a problem, we need to ensure that we add all of the other nodes' responses
                // to the Cluster Response so that the Cluster Response is complete.
                for (final NodeResponse otherResponse : nodeResponses) {
                    if (otherResponse.getNodeId().equals(nodeResponse.getNodeId())) {
                        continue;
                    }

                    clusterResponse.add(otherResponse);
                }
            }
        }
    };

    // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work
    final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId,
            method, createURI(uri, nodeId), entity, validationHeaders, completionCallback, clusterResponse);

    // replicate the 'verification request' to all nodes
    submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, validationHeaders);
}

From source file:org.apache.distributedlog.BKLogHandler.java

protected void readLogSegmentsFromStore(final Versioned<List<String>> logSegmentNames,
        final Comparator<LogSegmentMetadata> comparator, final LogSegmentFilter segmentFilter,
        final CompletableFuture<Versioned<List<LogSegmentMetadata>>> readResult) {
    Set<String> segmentsReceived = new HashSet<String>();
    segmentsReceived.addAll(segmentFilter.filter(logSegmentNames.getValue()));
    Set<String> segmentsAdded;
    final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>());
    final Map<String, LogSegmentMetadata> addedSegments = Collections
            .synchronizedMap(new HashMap<String, LogSegmentMetadata>());
    Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived);
    segmentsAdded = segmentChanges.getLeft();
    removedSegments.addAll(segmentChanges.getRight());

    if (segmentsAdded.isEmpty()) {
        if (LOG.isTraceEnabled()) {
            LOG.trace("No segments added for {}.", getFullyQualifiedName());
        }//ww w .j  a v  a2s  .c o m

        // update the cache before #getCachedLogSegments to return
        updateLogSegmentCache(removedSegments, addedSegments);

        List<LogSegmentMetadata> segmentList;
        try {
            segmentList = getCachedLogSegments(comparator);
        } catch (UnexpectedException e) {
            readResult.completeExceptionally(e);
            return;
        }

        readResult.complete(new Versioned<List<LogSegmentMetadata>>(segmentList, logSegmentNames.getVersion()));
        return;
    }

    final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size());
    final AtomicInteger numFailures = new AtomicInteger(0);
    for (final String segment : segmentsAdded) {
        String logSegmentPath = logMetadata.getLogSegmentPath(segment);
        LogSegmentMetadata cachedSegment = metadataCache.get(logSegmentPath);
        if (null != cachedSegment) {
            addedSegments.put(segment, cachedSegment);
            completeReadLogSegmentsFromStore(removedSegments, addedSegments, comparator, readResult,
                    logSegmentNames.getVersion(), numChildren, numFailures);
            continue;
        }
        metadataStore.getLogSegment(logSegmentPath).whenComplete(new FutureEventListener<LogSegmentMetadata>() {

            @Override
            public void onSuccess(LogSegmentMetadata result) {
                addedSegments.put(segment, result);
                complete();
            }

            @Override
            public void onFailure(Throwable cause) {
                // LogSegmentNotFoundException exception is possible in two cases
                // 1. A log segment was deleted by truncation between the call to getChildren and read
                // attempt on the znode corresponding to the segment
                // 2. In progress segment has been completed => inprogress ZNode does not exist
                if (cause instanceof LogSegmentNotFoundException) {
                    removedSegments.add(segment);
                    complete();
                } else {
                    // fail fast
                    if (1 == numFailures.incrementAndGet()) {
                        readResult.completeExceptionally(cause);
                        return;
                    }
                }
            }

            private void complete() {
                completeReadLogSegmentsFromStore(removedSegments, addedSegments, comparator, readResult,
                        logSegmentNames.getVersion(), numChildren, numFailures);
            }
        });
    }
}

From source file:HSqlManager.java

@SuppressWarnings("Duplicates")
@Deprecated//from   w w w. j  a v a2 s  .c  o m
public static void mycoUniqueDB(Connection connection, int bps) throws ClassNotFoundException, SQLException,
        InstantiationException, IllegalAccessException, IOException {
    long time = System.currentTimeMillis();
    DpalLoad.main(new String[1]);
    HSqlPrimerDesign.Dpal_Inst = DpalLoad.INSTANCE_WIN64;
    String base = new File("").getAbsolutePath();
    if (!written) {
        CSV.makeDirectory(new File(base + "/PhageData"));
        INSTANCE.parseAllPhages(bps);
    }
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    PrintWriter log = new PrintWriter(new File("javalog.log"));
    stat.execute("SET FILES LOG FALSE;\n");
    PreparedStatement st = db
            .prepareStatement("UPDATE Primerdb.Primers" + " SET UniqueP = true, Tm = ?, GC =?, Hairpin =?"
                    + "WHERE Cluster = ? and Strain = ? and " + "Sequence = ? and Bp = ?");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    String strain = "";
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
        if (r[2].equals("xkcd")) {
            strain = r[0];
        }
    }
    call.close();
    String x = strain;
    Set<String> clust = phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet());
    String[] clusters = clust.toArray(new String[clust.size()]);
    for (String z : clusters) {
        try {
            Set<String> nonclustphages = phages.stream().filter(a -> a[0].equals(x) && !a[1].equals(z))
                    .map(a -> a[2]).collect(Collectors.toSet());
            ResultSet resultSet = stat.executeQuery(
                    "Select Sequence from primerdb.primers" + " where Strain ='" + x + "' and Cluster ='" + z
                            + "' and CommonP = true" + " and Bp = " + Integer.valueOf(bps) + " ");
            Set<CharSequence> primers = Collections.synchronizedSet(new HashSet<>());
            while (resultSet.next()) {
                primers.add(resultSet.getString("Sequence"));
            }
            resultSet.close();
            for (String phage : nonclustphages) {
                //                    String[] seqs = Fasta.parse(base + "/Fastas/" + phage + ".fasta");
                //                    String sequence =seqs[0]+seqs[1];
                //                        Map<String, List<Integer>> seqInd = new HashMap<>();
                //                        for (int i = 0; i <= sequence.length()-bps; i++) {
                //                            String sub=sequence.substring(i,i+bps);
                //                            if(seqInd.containsKey(sub)){
                //                                seqInd.get(sub).add(i);
                //                            }else {
                //                                List<Integer> list = new ArrayList<>();
                //                                list.add(i);
                //                                seqInd.put(sub,list);
                //                            }
                //                        }
                //                    primers = primers.stream().filter(primer->!seqInd.containsKey(primer)).collect(Collectors.toSet());
                //                    primers =Sets.difference(primers,CSV.readCSV(base + "/PhageData/"+Integer.toString(bps)
                //                                    + phage + ".csv"));
                CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv").stream()
                        .filter(primers::contains).forEach(primers::remove);
                //                    System.gc();

            }
            int i = 0;
            for (CharSequence a : primers) {
                try {
                    st.setDouble(1, HSqlPrimerDesign.primerTm(a, 0, 800, 1.5, 0.2));
                    st.setDouble(2, HSqlPrimerDesign.gcContent(a));
                    st.setBoolean(3, HSqlPrimerDesign.calcHairpin((String) a, 4));
                    st.setString(4, z);
                    st.setString(5, x);
                    st.setString(6, a.toString());
                    st.setInt(7, bps);
                    st.addBatch();
                } catch (SQLException e) {
                    e.printStackTrace();
                    System.out.println("Error occurred at " + x + " " + z);
                }
                i++;
                if (i == 1000) {
                    i = 0;
                    st.executeBatch();
                    db.commit();
                }
            }
            if (i > 0) {
                st.executeBatch();
                db.commit();
            }
        } catch (SQLException e) {
            e.printStackTrace();
            System.out.println("Error occurred at " + x + " " + z);
        }
        log.println(z);
        log.flush();
        System.gc();
    }
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Unique Updated");
    System.out.println((System.currentTimeMillis() - time) / Math.pow(10, 3) / 60);
}

From source file:org.eclipse.equinox.http.servlet.tests.ServletTest.java

public void test_RegistrationTCCL1() {
    final Set<String> filterTCCL = Collections.synchronizedSet(new HashSet<String>());
    final Set<String> servletTCCL = Collections.synchronizedSet(new HashSet<String>());
    Filter tcclFilter = new Filter() {

        @Override/*from  w w w. j  a va  2  s  .  c  om*/
        public void init(FilterConfig filterConfig) throws ServletException {
            filterTCCL.add(Thread.currentThread().getContextClassLoader().getClass().getName());
        }

        @Override
        public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
                throws IOException, ServletException {
            filterTCCL.add(Thread.currentThread().getContextClassLoader().getClass().getName());
            chain.doFilter(request, response);
        }

        @Override
        public void destroy() {
            filterTCCL.add(Thread.currentThread().getContextClassLoader().getClass().getName());
        }
    };
    HttpServlet tcclServlet = new HttpServlet() {

        @Override
        public void destroy() {
            super.destroy();
            servletTCCL.add(Thread.currentThread().getContextClassLoader().getClass().getName());
        }

        @Override
        public void init(ServletConfig config) throws ServletException {
            super.init(config);
            servletTCCL.add(Thread.currentThread().getContextClassLoader().getClass().getName());
        }

        private static final long serialVersionUID = 1L;

        @Override
        protected void service(HttpServletRequest request, HttpServletResponse response)
                throws ServletException, IOException {
            servletTCCL.add(Thread.currentThread().getContextClassLoader().getClass().getName());
            response.getWriter().print(Thread.currentThread().getContextClassLoader().getClass().getName());
        }

    };

    ClassLoader originalTCCL = Thread.currentThread().getContextClassLoader();
    ClassLoader dummy = new ClassLoader() {
    };
    Thread.currentThread().setContextClassLoader(dummy);
    String expected = dummy.getClass().getName();
    String actual = null;
    ExtendedHttpService extendedHttpService = (ExtendedHttpService) getHttpService();
    try {
        extendedHttpService.registerFilter("/tccl", tcclFilter, null, null);
        extendedHttpService.registerServlet("/tccl", tcclServlet, null, null);
        actual = requestAdvisor.request("tccl");
    } catch (Exception e) {
        fail("Unexpected exception: " + e);
    } finally {
        Thread.currentThread().setContextClassLoader(originalTCCL);
        try {
            extendedHttpService.unregister("/tccl");
            extendedHttpService.unregisterFilter(tcclFilter);
        } catch (IllegalArgumentException e) {
            // ignore
        }
    }
    assertEquals(expected, actual);
    assertEquals("Wrong filterTCCL size: " + filterTCCL, 1, filterTCCL.size());
    assertTrue("Wrong filterTCCL: " + filterTCCL, filterTCCL.contains(expected));
    assertEquals("Wrong httpTCCL size: " + servletTCCL, 1, servletTCCL.size());
    assertTrue("Wrong servletTCCL: " + servletTCCL, servletTCCL.contains(expected));

}

From source file:com.twitter.distributedlog.BKLogHandler.java

private void asyncGetLedgerListInternal(final Comparator<LogSegmentMetadata> comparator,
        final LogSegmentFilter segmentFilter, final Watcher watcher,
        final GenericCallback<List<LogSegmentMetadata>> finalCallback, final AtomicInteger numAttemptsLeft,
        final AtomicLong backoffMillis) {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    try {// w w  w .  j av  a2s.com
        if (LOG.isTraceEnabled()) {
            LOG.trace("Async getting ledger list for {}.", getFullyQualifiedName());
        }
        final GenericCallback<List<LogSegmentMetadata>> callback = new GenericCallback<List<LogSegmentMetadata>>() {
            @Override
            public void operationComplete(int rc, List<LogSegmentMetadata> result) {
                long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);
                if (KeeperException.Code.OK.intValue() != rc) {
                    getListStat.registerFailedEvent(elapsedMicros);
                } else {
                    if (LogSegmentFilter.DEFAULT_FILTER == segmentFilter) {
                        isFullListFetched.set(true);
                    }
                    getListStat.registerSuccessfulEvent(elapsedMicros);
                }
                finalCallback.operationComplete(rc, result);
            }
        };
        zooKeeperClient.get().getChildren(logMetadata.getLogSegmentsPath(), watcher,
                new AsyncCallback.Children2Callback() {
                    @Override
                    public void processResult(final int rc, final String path, final Object ctx,
                            final List<String> children, final Stat stat) {
                        if (KeeperException.Code.OK.intValue() != rc) {

                            if ((KeeperException.Code.CONNECTIONLOSS.intValue() == rc
                                    || KeeperException.Code.SESSIONEXPIRED.intValue() == rc
                                    || KeeperException.Code.SESSIONMOVED.intValue() == rc)
                                    && numAttemptsLeft.decrementAndGet() > 0) {
                                long backoffMs = backoffMillis.get();
                                backoffMillis.set(Math.min(conf.getZKRetryBackoffMaxMillis(), 2 * backoffMs));
                                scheduler.schedule(new Runnable() {
                                    @Override
                                    public void run() {
                                        asyncGetLedgerListInternal(comparator, segmentFilter, watcher,
                                                finalCallback, numAttemptsLeft, backoffMillis);
                                    }
                                }, backoffMs, TimeUnit.MILLISECONDS);
                                return;
                            }
                            callback.operationComplete(rc, null);
                            return;
                        }

                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Got ledger list from {} : {}", logMetadata.getLogSegmentsPath(),
                                    children);
                        }

                        ledgerListWatchSet.set(true);
                        Set<String> segmentsReceived = new HashSet<String>();
                        segmentsReceived.addAll(segmentFilter.filter(children));
                        Set<String> segmentsAdded;
                        final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>());
                        final Map<String, LogSegmentMetadata> addedSegments = Collections
                                .synchronizedMap(new HashMap<String, LogSegmentMetadata>());
                        Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived);
                        segmentsAdded = segmentChanges.getLeft();
                        removedSegments.addAll(segmentChanges.getRight());

                        if (segmentsAdded.isEmpty()) {
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("No segments added for {}.", getFullyQualifiedName());
                            }

                            // update the cache before fetch
                            logSegmentCache.update(removedSegments, addedSegments);

                            List<LogSegmentMetadata> segmentList;
                            try {
                                segmentList = getCachedLogSegments(comparator);
                            } catch (UnexpectedException e) {
                                callback.operationComplete(KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                        null);
                                return;
                            }
                            callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList);
                            notifyUpdatedLogSegments(segmentList);
                            if (!removedSegments.isEmpty()) {
                                notifyOnOperationComplete();
                            }
                            return;
                        }

                        final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size());
                        final AtomicInteger numFailures = new AtomicInteger(0);
                        for (final String segment : segmentsAdded) {
                            metadataStore.getLogSegment(logMetadata.getLogSegmentPath(segment))
                                    .addEventListener(new FutureEventListener<LogSegmentMetadata>() {

                                        @Override
                                        public void onSuccess(LogSegmentMetadata result) {
                                            addedSegments.put(segment, result);
                                            complete();
                                        }

                                        @Override
                                        public void onFailure(Throwable cause) {
                                            // NONODE exception is possible in two cases
                                            // 1. A log segment was deleted by truncation between the call to getChildren and read
                                            // attempt on the znode corresponding to the segment
                                            // 2. In progress segment has been completed => inprogress ZNode does not exist
                                            if (cause instanceof KeeperException
                                                    && KeeperException.Code.NONODE == ((KeeperException) cause)
                                                            .code()) {
                                                removedSegments.add(segment);
                                                complete();
                                            } else {
                                                // fail fast
                                                if (1 == numFailures.incrementAndGet()) {
                                                    int rcToReturn = KeeperException.Code.SYSTEMERROR
                                                            .intValue();
                                                    if (cause instanceof KeeperException) {
                                                        rcToReturn = ((KeeperException) cause).code()
                                                                .intValue();
                                                    } else if (cause instanceof ZKException) {
                                                        rcToReturn = ((ZKException) cause)
                                                                .getKeeperExceptionCode().intValue();
                                                    }
                                                    // :( properly we need dlog related response code.
                                                    callback.operationComplete(rcToReturn, null);
                                                    return;
                                                }
                                            }
                                        }

                                        private void complete() {
                                            if (0 == numChildren.decrementAndGet() && numFailures.get() == 0) {
                                                // update the cache only when fetch completed
                                                logSegmentCache.update(removedSegments, addedSegments);
                                                List<LogSegmentMetadata> segmentList;
                                                try {
                                                    segmentList = getCachedLogSegments(comparator);
                                                } catch (UnexpectedException e) {
                                                    callback.operationComplete(
                                                            KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                                            null);
                                                    return;
                                                }
                                                callback.operationComplete(KeeperException.Code.OK.intValue(),
                                                        segmentList);
                                                notifyUpdatedLogSegments(segmentList);
                                                notifyOnOperationComplete();
                                            }
                                        }
                                    });
                        }
                    }
                }, null);
    } catch (ZooKeeperClient.ZooKeeperConnectionException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    } catch (InterruptedException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    }
}

From source file:com.bigdata.dastor.service.StorageService.java

private void unbootstrap(final Runnable onFinish) {
    final CountDownLatch latch = new CountDownLatch(DatabaseDescriptor.getNonSystemTables().size());
    for (final String table : DatabaseDescriptor.getNonSystemTables()) {
        Multimap<Range, InetAddress> rangesMM = getChangedRangesForLeaving(table,
                FBUtilities.getLocalAddress());
        if (logger_.isDebugEnabled())
            logger_.debug("Ranges needing transfer are [" + StringUtils.join(rangesMM.keySet(), ",") + "]");
        if (rangesMM.isEmpty()) {
            latch.countDown();//from  ww  w.  j a  va2  s. c  o  m
            continue;
        }

        setMode("Leaving: streaming data to other nodes", true);
        final Set<Map.Entry<Range, InetAddress>> pending = Collections
                .synchronizedSet(new HashSet<Map.Entry<Range, InetAddress>>(rangesMM.entries()));
        for (final Map.Entry<Range, InetAddress> entry : rangesMM.entries()) {
            final Range range = entry.getKey();
            final InetAddress newEndpoint = entry.getValue();
            final Runnable callback = new Runnable() {
                public void run() {
                    pending.remove(entry);
                    if (pending.isEmpty())
                        latch.countDown();
                }
            };
            StageManager.getStage(StageManager.STREAM_STAGE).execute(new Runnable() {
                public void run() {
                    // TODO each call to transferRanges re-flushes, this is potentially a lot of waste
                    StreamOut.transferRanges(newEndpoint, table, Arrays.asList(range), callback);
                }
            });
        }
    }

    // wait for the transfer runnables to signal the latch.
    logger_.debug("waiting for stream aks.");
    try {
        latch.await();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    logger_.debug("stream acks all received.");
    leaveRing();
    onFinish.run();
}

From source file:jenkins.model.Jenkins.java

private synchronized TaskBuilder loadTasks() throws IOException {
    File projectsDir = new File(root, "jobs");
    if (!projectsDir.getCanonicalFile().isDirectory() && !projectsDir.mkdirs()) {
        if (projectsDir.exists())
            throw new IOException(projectsDir + " is not a directory");
        throw new IOException("Unable to create " + projectsDir
                + "\nPermission issue? Please create this directory manually.");
    }//from w  w w  .  jav  a2s  .c  om
    File[] subdirs = projectsDir.listFiles();

    final Set<String> loadedNames = Collections.synchronizedSet(new HashSet<String>());

    TaskGraphBuilder g = new TaskGraphBuilder();
    Handle loadJenkins = g.requires(EXTENSIONS_AUGMENTED).attains(JOB_LOADED).add("Loading global config",
            new Executable() {
                public void run(Reactor session) throws Exception {
                    XmlFile cfg = getConfigFile();
                    if (cfg.exists()) {
                        // reset some data that may not exist in the disk file
                        // so that we can take a proper compensation action later.
                        primaryView = null;
                        views.clear();

                        // load from disk
                        cfg.unmarshal(Jenkins.this);
                    }

                    // if we are loading old data that doesn't have this field
                    if (slaves != null && !slaves.isEmpty() && nodes.isLegacy()) {
                        nodes.setNodes(slaves);
                        slaves = null;
                    } else {
                        nodes.load();
                    }

                    clouds.setOwner(Jenkins.this);
                }
            });

    for (final File subdir : subdirs) {
        g.requires(loadJenkins).attains(JOB_LOADED).notFatal().add("Loading job " + subdir.getName(),
                new Executable() {
                    public void run(Reactor session) throws Exception {
                        if (!Items.getConfigFile(subdir).exists()) {
                            //Does not have job config file, so it is not a jenkins job hence skip it
                            return;
                        }
                        TopLevelItem item = (TopLevelItem) Items.load(Jenkins.this, subdir);
                        items.put(item.getName(), item);
                        loadedNames.add(item.getName());
                    }
                });
    }

    g.requires(JOB_LOADED).add("Cleaning up old builds", new Executable() {
        public void run(Reactor reactor) throws Exception {
            // anything we didn't load from disk, throw them away.
            // doing this after loading from disk allows newly loaded items
            // to inspect what already existed in memory (in case of reloading)

            // retainAll doesn't work well because of CopyOnWriteMap implementation, so remove one by one
            // hopefully there shouldn't be too many of them.
            for (String name : items.keySet()) {
                if (!loadedNames.contains(name))
                    items.remove(name);
            }
        }
    });

    g.requires(JOB_LOADED).add("Finalizing set up", new Executable() {
        public void run(Reactor session) throws Exception {
            rebuildDependencyGraph();

            {// recompute label objects - populates the labels mapping.
                for (Node slave : nodes.getNodes())
                    // Note that not all labels are visible until the slaves have connected.
                    slave.getAssignedLabels();
                getAssignedLabels();
            }

            // initialize views by inserting the default view if necessary
            // this is both for clean Jenkins and for backward compatibility.
            if (views.size() == 0 || primaryView == null) {
                View v = new AllView(Messages.Hudson_ViewName());
                setViewOwner(v);
                views.add(0, v);
                primaryView = v.getViewName();
            }

            if (useSecurity != null && !useSecurity) {
                // forced reset to the unsecure mode.
                // this works as an escape hatch for people who locked themselves out.
                authorizationStrategy = AuthorizationStrategy.UNSECURED;
                setSecurityRealm(SecurityRealm.NO_AUTHENTICATION);
            } else {
                // read in old data that doesn't have the security field set
                if (authorizationStrategy == null) {
                    if (useSecurity == null)
                        authorizationStrategy = AuthorizationStrategy.UNSECURED;
                    else
                        authorizationStrategy = new LegacyAuthorizationStrategy();
                }
                if (securityRealm == null) {
                    if (useSecurity == null)
                        setSecurityRealm(SecurityRealm.NO_AUTHENTICATION);
                    else
                        setSecurityRealm(new LegacySecurityRealm());
                } else {
                    // force the set to proxy
                    setSecurityRealm(securityRealm);
                }
            }

            // Initialize the filter with the crumb issuer
            setCrumbIssuer(crumbIssuer);

            // auto register root actions
            for (Action a : getExtensionList(RootAction.class))
                if (!actions.contains(a))
                    actions.add(a);
        }
    });

    return g;
}

From source file:org.apache.hadoop.mapred.JobTracker.java

/**
 * Adds a new node to the jobtracker. It involves adding it to the expiry
 * thread and adding it for resolution/*from   w w  w .  j  a  v a2 s .  c om*/
 * 
 * Assumes JobTracker, taskTrackers and trackerExpiryQueue is locked on entry
 * 
 * @param status Task Tracker's status
 */
private void addNewTracker(TaskTracker taskTracker) throws UnknownHostException {
    TaskTrackerStatus status = taskTracker.getStatus();
    trackerExpiryQueue.add(status);

    //  Register the tracker if its not registered
    String hostname = status.getHost();
    if (getNode(status.getTrackerName()) == null) {
        // Making the network location resolution inline .. 
        resolveAndAddToTopology(hostname);
    }

    // add it to the set of tracker per host
    Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostname);
    if (trackers == null) {
        trackers = Collections.synchronizedSet(new HashSet<TaskTracker>());
        hostnameToTaskTracker.put(hostname, trackers);
    }
    statistics.taskTrackerAdded(status.getTrackerName());
    getInstrumentation().addTrackers(1);
    LOG.info("Adding tracker " + status.getTrackerName() + " to host " + hostname);
    trackers.add(taskTracker);
}