Example usage for java.util Collections synchronizedList

List of usage examples for java.util Collections synchronizedList

Introduction

In this page you can find the example usage for java.util Collections synchronizedList.

Prototype

public static <T> List<T> synchronizedList(List<T> list) 

Source Link

Document

Returns a synchronized (thread-safe) list backed by the specified list.

Usage

From source file:org.apache.syncope.core.persistence.dao.impl.SubjectSearchDAOImpl.java

@SuppressWarnings("unchecked")
private <T extends AbstractSubject> List<T> doSearch(final Set<Long> adminRoles, final SearchCond nodeCond,
        final int page, final int itemsPerPage, final List<OrderByClause> orderBy, final SubjectType type) {

    List<Object> parameters = Collections.synchronizedList(new ArrayList<Object>());

    // 1. get the query string from the search condition
    SearchSupport svs = new SearchSupport(type);
    StringBuilder queryString = getQuery(nodeCond, parameters, type, svs);

    // 2. take into account administrative roles and ordering
    OrderBySupport orderBySupport = parseOrderBy(type, svs, orderBy);
    if (queryString.charAt(0) == '(') {
        queryString.insert(0, buildSelect(orderBySupport));
        queryString.append(buildWhere(orderBySupport, type));
    } else {// w  w w .  j  a  va  2  s .  co  m
        queryString.insert(0, buildSelect(orderBySupport).append('('));
        queryString.append(')').append(buildWhere(orderBySupport, type));
    }
    queryString.append(getAdminRolesFilter(adminRoles, type)).append(')').append(buildOrderBy(orderBySupport));

    // 3. prepare the search query
    Query query = entityManager.createNativeQuery(queryString.toString());

    // 4. page starts from 1, while setFirtResult() starts from 0
    query.setFirstResult(itemsPerPage * (page <= 0 ? 0 : page - 1));

    if (itemsPerPage >= 0) {
        query.setMaxResults(itemsPerPage);
    }

    // 5. populate the search query with parameter values
    fillWithParameters(query, parameters);

    LOG.debug("Native query\n{}\nwith parameters\n{}", queryString.toString(), parameters);

    // 6. Prepare the result (avoiding duplicates)
    List<T> result = new ArrayList<T>();

    for (Object subjectId : query.getResultList()) {
        long actualId;
        if (subjectId instanceof Object[]) {
            actualId = ((Number) ((Object[]) subjectId)[0]).longValue();
        } else {
            actualId = ((Number) subjectId).longValue();
        }

        T subject = type == SubjectType.USER ? (T) userDAO.find(actualId) : (T) roleDAO.find(actualId);
        if (subject == null) {
            LOG.error("Could not find {} with id {}, even though returned by the native query", type,
                    subjectId);
        } else {
            if (!result.contains(subject)) {
                result.add(subject);
            }
        }
    }

    return result;
}

From source file:org.bonej.wrapperPlugins.AnisotropyWrapper.java

private List<Vector3d> runDirectionsInParallel(final RandomAccessibleInterval<BitType> interval)
        throws ExecutionException, InterruptedException {
    final int cores = Runtime.getRuntime().availableProcessors();
    // The parallellization of the the MILPlane algorithm is a memory bound
    // problem, which is why speed gains start to drop after 5 cores. With much
    // larger 'nThreads' it slows down due to overhead. Of course '5' here is a
    // bit of a magic number, which might not hold true for all environments,
    // but we need some kind of upper bound
    final int nThreads = Math.max(5, cores);
    final ExecutorService executor = Executors.newFixedThreadPool(nThreads);
    final Callable<Vector3d> milTask = () -> milOp.calculate(interval, randomQuaternion());
    final List<Future<Vector3d>> futures = generate(() -> milTask).limit(directions).map(executor::submit)
            .collect(toList());// w ww . j  a  v a2  s . c  om
    final List<Vector3d> pointCloud = Collections.synchronizedList(new ArrayList<>(directions));
    final int futuresSize = futures.size();
    final AtomicInteger progress = new AtomicInteger();
    for (final Future<Vector3d> future : futures) {
        statusService.showProgress(progress.getAndIncrement(), futuresSize);
        pointCloud.add(future.get());
    }
    shutdownAndAwaitTermination(executor);
    return pointCloud;
}

From source file:org.apache.syncope.core.persistence.jpa.dao.JPASubjectSearchDAO.java

@SuppressWarnings("unchecked")
private <T extends Subject<?, ?, ?>> List<T> doSearch(final Set<Long> adminRoles, final SearchCond nodeCond,
        final int page, final int itemsPerPage, final List<OrderByClause> orderBy, final SubjectType type) {

    List<Object> parameters = Collections.synchronizedList(new ArrayList<>());

    // 1. get the query string from the search condition
    SearchSupport svs = new SearchSupport(type);
    StringBuilder queryString = getQuery(nodeCond, parameters, type, svs);

    // 2. take into account administrative roles and ordering
    OrderBySupport orderBySupport = parseOrderBy(type, svs, orderBy);
    if (queryString.charAt(0) == '(') {
        queryString.insert(0, buildSelect(orderBySupport));
        queryString.append(buildWhere(orderBySupport, type));
    } else {//from   ww w.  j  a v  a  2  s  . c om
        queryString.insert(0, buildSelect(orderBySupport).append('('));
        queryString.append(')').append(buildWhere(orderBySupport, type));
    }
    queryString.append(getAdminRolesFilter(adminRoles, type)).append(')').append(buildOrderBy(orderBySupport));

    // 3. prepare the search query
    Query query = entityManager.createNativeQuery(queryString.toString());

    // 4. page starts from 1, while setFirtResult() starts from 0
    query.setFirstResult(itemsPerPage * (page <= 0 ? 0 : page - 1));

    if (itemsPerPage >= 0) {
        query.setMaxResults(itemsPerPage);
    }

    // 5. populate the search query with parameter values
    fillWithParameters(query, parameters);

    LOG.debug("Native query\n{}\nwith parameters\n{}", queryString.toString(), parameters);

    // 6. Prepare the result (avoiding duplicates)
    List<T> result = new ArrayList<>();

    for (Object subjectId : query.getResultList()) {
        long actualId;
        if (subjectId instanceof Object[]) {
            actualId = ((Number) ((Object[]) subjectId)[0]).longValue();
        } else {
            actualId = ((Number) subjectId).longValue();
        }

        T subject = type == SubjectType.USER ? (T) userDAO.find(actualId) : (T) roleDAO.find(actualId);
        if (subject == null) {
            LOG.error("Could not find {} with id {}, even though returned by the native query", type, actualId);
        } else {
            if (!result.contains(subject)) {
                result.add(subject);
            }
        }
    }

    return result;
}

From source file:com.gemini.provision.loadbalancer.openstack.LoadBalancerProviderOpenStackImpl.java

@Override
public List<GeminiPoolMember> getPoolMembers(GeminiTenant tenant, GeminiEnvironment env,
        GeminiLoadBalancerPool pool) {//w ww.  java2  s .c  o m
    List<GeminiPoolMember> lbPools = Collections.synchronizedList(new ArrayList());

    //authenticate the session with the OpenStack installation
    OSClient os = getOSClient(tenant, env);
    if (os == null) {
        Logger.error("Failed to authenticate Tenant: {}",
                ToStringBuilder.reflectionToString(tenant, ToStringStyle.MULTI_LINE_STYLE));
        return null;
    }
    List<? extends Member> members = os.networking().loadbalancers().member().list();
    members.stream().filter(member -> member != null).forEach(member -> {
        GeminiPoolMember geminiPoolMember = new GeminiPoolMember();
        geminiPoolMember.setCloudID(member.getId());
        geminiPoolMember.setAdminState(member.isAdminStateUp() ? AdminState.ADMIN_UP : AdminState.ADMIN_DOWN);
        geminiPoolMember.setProvisionState(ProvisionState.fromString(member.getStatus()));
        geminiPoolMember.setIpAddress(member.getAddress());
        geminiPoolMember.setProtocolPort(member.getProtocolPort());
        geminiPoolMember.setWeight(member.getWeight());
        geminiPoolMember.setPoolId(member.getPoolId());
        lbPools.add(geminiPoolMember);
    });
    return lbPools;
}

From source file:com.emc.vipr.sync.CasMigrationTest.java

protected List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter)
        throws Exception {
    ExecutorService service = Executors.newFixedThreadPool(CAS_SETUP_THREADS);

    System.out.print("Creating clips");

    List<String> clipIds = Collections.synchronizedList(new ArrayList<String>());
    List<String> summaries = Collections.synchronizedList(new ArrayList<String>());
    for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) {
        service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries));
    }//from w ww .  j a v a2 s  .co m

    service.shutdown();
    service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES);
    service.shutdownNow();

    Collections.sort(summaries);
    for (String summary : summaries) {
        summaryWriter.append(summary);
    }

    System.out.println();

    return clipIds;
}

From source file:com.smartitengineering.cms.spi.impl.content.VelocityGeneratorTest.java

@Test
public void testMultiVelocityRepGeneration() throws IOException {
    TypeRepresentationGenerator generator = new VelocityRepresentationGenerator();
    final RepresentationTemplate template = mockery.mock(RepresentationTemplate.class);
    WorkspaceAPIImpl impl = new WorkspaceAPIImpl() {

        @Override/*from  w  w w . jav a 2 s  .  c o m*/
        public RepresentationTemplate getRepresentationTemplate(WorkspaceId id, String name) {
            return template;
        }
    };
    impl.setRepresentationGenerators(Collections.singletonMap(TemplateType.VELOCITY, generator));
    final RepresentationProvider provider = new RepresentationProviderImpl();
    final WorkspaceAPI api = impl;
    registerBeanFactory(api);
    final Content content = mockery.mock(Content.class);
    final Field field = mockery.mock(Field.class);
    final FieldValue value = mockery.mock(FieldValue.class);
    final Map<String, Field> fieldMap = mockery.mock(Map.class);
    final ContentType type = mockery.mock(ContentType.class);
    final Map<String, RepresentationDef> reps = mockery.mock(Map.class, "repMap");
    final RepresentationDef def = mockery.mock(RepresentationDef.class);
    final int threadCount = new Random().nextInt(100);
    logger.info("Number of parallel threads " + threadCount);
    mockery.checking(new Expectations() {

        {
            exactly(threadCount).of(template).getTemplateType();
            will(returnValue(TemplateType.VELOCITY));
            exactly(threadCount).of(template).getTemplate();
            final byte[] toByteArray = IOUtils.toByteArray(
                    getClass().getClassLoader().getResourceAsStream("scripts/velocity/test-template.vm"));
            will(returnValue(toByteArray));
            exactly(threadCount).of(template).getName();
            will(returnValue(REP_NAME));
            for (int i = 0; i < threadCount; ++i) {
                exactly(1).of(value).getValue();
                will(returnValue(String.valueOf(i)));
            }
            exactly(threadCount).of(field).getValue();
            will(returnValue(value));
            exactly(threadCount).of(fieldMap).get(with(Expectations.<String>anything()));
            will(returnValue(field));
            exactly(threadCount).of(content).getFields();
            will(returnValue(fieldMap));
            exactly(threadCount).of(content).getContentDefinition();
            will(returnValue(type));
            final ContentId contentId = mockery.mock(ContentId.class);
            exactly(2 * threadCount).of(content).getContentId();
            will(returnValue(contentId));
            final WorkspaceId wId = mockery.mock(WorkspaceId.class);
            exactly(threadCount).of(contentId).getWorkspaceId();
            will(returnValue(wId));
            exactly(2 * threadCount).of(type).getRepresentationDefs();
            will(returnValue(reps));
            exactly(2 * threadCount).of(reps).get(with(REP_NAME));
            will(returnValue(def));
            exactly(threadCount).of(def).getParameters();
            will(returnValue(Collections.emptyMap()));
            exactly(threadCount).of(def).getMIMEType();
            will(returnValue(GroovyGeneratorTest.MIME_TYPE));
            final ResourceUri rUri = mockery.mock(ResourceUri.class);
            exactly(threadCount).of(def).getResourceUri();
            will(returnValue(rUri));
            exactly(threadCount).of(rUri).getValue();
            will(returnValue("iUri"));
        }
    });
    final Set<String> set = Collections.synchronizedSet(new LinkedHashSet<String>(threadCount));
    final List<String> list = Collections.synchronizedList(new ArrayList<String>(threadCount));
    final AtomicInteger integer = new AtomicInteger(0);
    Threads group = new Threads();
    for (int i = 0; i < threadCount; ++i) {
        group.addThread(new Thread(new Runnable() {

            public void run() {
                Representation representation = provider.getRepresentation(REP_NAME, type, content);
                Assert.assertNotNull(representation);
                Assert.assertEquals(REP_NAME, representation.getName());
                final String rep = StringUtils.newStringUtf8(representation.getRepresentation());
                list.add(rep);
                set.add(rep);
                Assert.assertEquals(GroovyGeneratorTest.MIME_TYPE, representation.getMimeType());
                integer.addAndGet(1);
            }
        }));
    }
    group.start();
    try {
        group.join();
    } catch (Exception ex) {
        logger.error(ex.getMessage(), ex);
    }
    logger.info("Generated reps list: " + list);
    logger.info("Generated reps set: " + set);
    Assert.assertEquals(threadCount, integer.get());
    Assert.assertEquals(threadCount, list.size());
    Assert.assertEquals(threadCount, set.size());
}

From source file:org.eclipse.gemini.blueprint.extender.internal.activator.LifecycleManager.java

public void destroy() {
    // first stop the watchdog
    stopTimer();/*  w  w  w  .j av  a  2  s .  c  om*/

    // get hold of the needed bundles
    List<Bundle> bundles = new ArrayList<Bundle>(managedContexts.size());

    for (ConfigurableOsgiBundleApplicationContext context : managedContexts.values()) {
        bundles.add(context.getBundle());
    }

    boolean debug = log.isDebugEnabled();

    if (debug) {
        log.debug("Starting shutdown procedure for bundles " + bundles);
    }
    while (!bundles.isEmpty()) {
        Collection<Bundle> candidates = ShutdownSorter.getBundles(bundles);
        if (debug)
            log.debug("Staging shutdown for bundles " + candidates);

        final List<Runnable> taskList = new ArrayList<Runnable>(candidates.size());
        final List<ConfigurableOsgiBundleApplicationContext> closedContexts = Collections
                .synchronizedList(new ArrayList<ConfigurableOsgiBundleApplicationContext>());
        final Object[] contextClosingDown = new Object[1];

        for (Bundle shutdownBundle : candidates) {
            final ConfigurableOsgiBundleApplicationContext context = getManagedContext(shutdownBundle);
            if (context != null) {
                closedContexts.add(context);
                // add a new runnable
                taskList.add(new Runnable() {

                    private final String toString = "Closing runnable for context " + context.getDisplayName();

                    public void run() {
                        contextClosingDown[0] = context;
                        // eliminate context
                        closedContexts.remove(context);
                        closeApplicationContext(context);
                    }

                    public String toString() {
                        return toString;
                    }
                });
            }
        }

        // tasks
        final Runnable[] tasks = taskList.toArray(new Runnable[taskList.size()]);

        for (Runnable task : tasks) {
            if (extenderConfiguration.shouldShutdownAsynchronously()) {
                if (execute(task, extenderConfiguration.getShutdownWaitTime(), shutdownTaskExecutor)) {
                    if (debug) {
                        log.debug(contextClosingDown[0]
                                + " context did not close successfully; forcing shutdown...");
                    }
                }
            } else {
                try {
                    task.run();
                } catch (Exception e) {
                    log.error(contextClosingDown[0] + " context close failed.", e);
                }
            }
        }
    }

    this.managedContexts.clear();

    // before bailing out; wait for the threads that might be left by
    // the task executor
    stopTaskExecutor();
}

From source file:org.springframework.osgi.extender.internal.activator.LifecycleManager.java

public void destroy() {
    // first stop the watchdog
    stopTimer();//from  www .  j a v a2 s  .com

    // get hold of the needed bundles
    List<Bundle> bundles = new ArrayList<Bundle>(managedContexts.size());

    for (ConfigurableOsgiBundleApplicationContext context : managedContexts.values()) {
        bundles.add(context.getBundle());
    }

    boolean debug = log.isDebugEnabled();

    if (debug) {
        log.debug("Starting shutdown procedure for bundles " + bundles);
    }
    while (!bundles.isEmpty()) {
        Collection<Bundle> candidates = ShutdownSorter.getBundles(bundles);
        if (debug)
            log.debug("Staging shutdown for bundles " + candidates);

        final List<Runnable> taskList = new ArrayList<Runnable>(candidates.size());
        final List<ConfigurableOsgiBundleApplicationContext> closedContexts = Collections
                .synchronizedList(new ArrayList<ConfigurableOsgiBundleApplicationContext>());
        final Object[] contextClosingDown = new Object[1];

        for (Bundle shutdownBundle : candidates) {
            Long id = new Long(shutdownBundle.getBundleId());
            final ConfigurableOsgiBundleApplicationContext context = (ConfigurableOsgiBundleApplicationContext) managedContexts
                    .get(id);
            if (context != null) {
                closedContexts.add(context);
                // add a new runnable
                taskList.add(new Runnable() {

                    private final String toString = "Closing runnable for context " + context.getDisplayName();

                    public void run() {
                        contextClosingDown[0] = context;
                        // eliminate context
                        closedContexts.remove(context);
                        closeApplicationContext(context);
                    }

                    public String toString() {
                        return toString;
                    }
                });
            }
        }

        // tasks
        final Runnable[] tasks = (Runnable[]) taskList.toArray(new Runnable[taskList.size()]);

        // start the ripper >:)
        for (int j = 0; j < tasks.length; j++) {
            if (RunnableTimedExecution.execute(tasks[j], extenderConfiguration.getShutdownWaitTime(),
                    shutdownTaskExecutor)) {
                if (debug) {
                    log.debug(
                            contextClosingDown[0] + " context did not close successfully; forcing shutdown...");
                }
            }
        }
    }

    this.managedContexts.clear();

    // before bailing out; wait for the threads that might be left by
    // the task executor
    stopTaskExecutor();
}

From source file:org.jumpmind.metl.core.runtime.AgentRuntime.java

private void deploy(final AgentDeployment deployment) {
    DeploymentStatus status = deployment.getDeploymentStatus();
    if (!status.equals(DeploymentStatus.DISABLED) && !status.equals(DeploymentStatus.REQUEST_DISABLE)
            && !status.equals(DeploymentStatus.REQUEST_REMOVE)) {
        try {/*from  w w  w  .ja v  a2  s .  c om*/
            log.info("Deploying '{}' to '{}'", deployment.getFlow().toString(), agent.getName());

            deployResources(deployment.getFlow());

            if (scheduledFlows.get(deployment) == null) {
                scheduledFlows.put(deployment, Collections.synchronizedList(new ArrayList<FlowRuntime>()));
            }

            doComponentDeploymentEvent(deployment, (l, f, s, c) -> l.onDeploy(agent, deployment, f, s, c));

            if (deployment.asStartType() == StartType.ON_DEPLOY) {
                scheduleNow(deployment);
            } else if (deployment.asStartType() == StartType.SCHEDULED_CRON) {
                String cron = deployment.getStartExpression();
                log.info(
                        "Scheduling '{}' on '{}' with a cron expression of '{}'  The next run time should be at: {}",
                        new Object[] { deployment.getFlow().toString(), agent.getName(), cron,
                                new CronSequenceGenerator(cron).next(new Date()) });

                ScheduledFuture<?> future = this.flowExecutionScheduler.schedule(
                        new FlowRunner(deployment, UUID.randomUUID().toString()), new CronTrigger(cron));
                scheduledDeployments.put(deployment, future);
            }

            deployment.setStatus(DeploymentStatus.DEPLOYED.name());
            deployment.setMessage("");
            log.info("Flow '{}' has been deployed", deployment.getFlow().getName());
        } catch (Exception e) {
            log.warn("Failed to start '{}'", deployment.getFlow().getName(), e);
            deployment.setStatus(DeploymentStatus.ERROR.name());
            deployment.setMessage(ExceptionUtils.getRootCauseMessage(e));
        }
        configurationService.save(deployment);
    }
}