Example usage for java.util Collections synchronizedList

List of usage examples for java.util Collections synchronizedList

Introduction

In this page you can find the example usage for java.util Collections synchronizedList.

Prototype

public static <T> List<T> synchronizedList(List<T> list) 

Source Link

Document

Returns a synchronized (thread-safe) list backed by the specified list.

Usage

From source file:com.nbzs.ningbobus.ui.loaders.BusRunInfoReaderLoader.java

public List<BusLineRunInfoItem> loadInBackground() {
    Log.d(TAG, "loading URI: " + mFeedUri);

    SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this.getContext());
    String selectedBusLines = prefs.getString("SelectedBusLines", "");
    String[] selected = selectedBusLines.split(",");
    List<Integer> busLines = new ArrayList<Integer>();
    for (int i = 0; i < selected.length; ++i) {
        try {/*from  w  w  w .j ava  2  s  .  c  om*/
            busLines.add(Integer.parseInt(selected[i]));
        } catch (NumberFormatException e) {
        }
    }

    Calendar cal = Calendar.getInstance();
    cal.getTime();
    SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss");
    Log.i(TAG, "start read feed at" + sdf.format(cal.getTime()));

    final List<BusLineRunInfoItem> items = Collections.synchronizedList(new ArrayList<BusLineRunInfoItem>());

    Thread[] threads = new Thread[busLines.size()];
    for (int i = 0; i < busLines.size(); ++i) {
        final int idx = busLines.get(i);
        threads[i] = new Thread(new Runnable() {
            public void run() {
                BusLineRunInfoItem item = downloadOneFeedItem(idx);
                synchronized (items) {
                    if (item != null) {
                        items.add(item);
                    }
                }
            }
        });
        threads[i].start();
    }
    try {
        for (Thread thread : threads) {
            thread.join();
        }
    } catch (InterruptedException e) {
        Log.d(TAG, "this should not happen");
        Log.d(TAG, Log.getStackTraceString(e));
    }
    cal.getTime();
    Log.i(TAG, "end read feed at" + sdf.format(cal.getTime()));
    return items;
}

From source file:architecture.ee.plugin.impl.PluginManagerImpl.java

protected PluginManagerImpl() {
    pluginListeners = new HashSet<PluginListener>();
    pluginProperties = new ConcurrentHashMap<String, PluginProperties>();
    bundleCache = new ConcurrentHashMap<Locale, List<ResourceBundle>>();
    initialized = new AtomicBoolean(false);
    pluginDirectory = ApplicationHelper.getRepository().getFile("plugins");
    plugins = new ConcurrentHashMap<String, Plugin>();
    brokenPlugins = new ConcurrentHashMap<String, String>();
    brokenPluginUpgradeExceptions = new ConcurrentHashMap<String, List<Exception>>();
    pluginDirs = new ConcurrentHashMap<Plugin, File>();
    classloaders = new ConcurrentHashMap<Object, PluginClassLoader>();
    pluginMeta = new ConcurrentHashMap<Object, PluginMetaData>();
    devPlugins = new HashSet<String>();
    customURLMapperList = Collections.synchronizedList(new ArrayList());
}

From source file:com.vmware.photon.controller.cloudstore.xenon.entity.SchedulingConstantGeneratorTest.java

/**
 * Test distribution of scheduling constants, creating hosts concurrently on a
 * single Xenon host.//from  w  ww  .  j  a  va 2  s . com
 */
@Test(dataProvider = "HostCounts")
public void testSchedulingConstantVariationConcurrent(int hostCount) throws Throwable {
    List<Long> schedulingConstants = Collections.synchronizedList(new ArrayList<>());
    TestEnvironment env = TestEnvironment.create(1);
    List<Thread> threads = new ArrayList<>();
    ServiceHost xenonHost = env.getHosts()[0];

    IntStream.range(0, THREADS).forEach((threadId) -> {
        Thread t = new Thread(() -> {
            List<Long> thisThreadSchedulingConstants = createHosts(xenonHost, hostCount);
            schedulingConstants.addAll(thisThreadSchedulingConstants);
        });
        t.start();
        threads.add(t);
    });

    for (Thread t : threads) {
        t.join();
    }

    env.stop();

    assertThat(schedulingConstants.size(), equalTo(hostCount * THREADS));
    Collections.sort(schedulingConstants);

    double cv = schedulingConstantGapCV(schedulingConstants);
    logger.info("Scheduling constant gap coefficient of variation: {}", cv);
    assertThat(cv, lessThan(MAX_VARIATION));
}

From source file:org.apache.flume.channel.recoverable.memory.wal.TestWAL.java

@Test
public void testThreadedAppend() throws IOException, InterruptedException {
    int numThreads = 10;
    final CountDownLatch startLatch = new CountDownLatch(numThreads);
    final CountDownLatch stopLatch = new CountDownLatch(numThreads);
    final AtomicLong seqid = new AtomicLong(0);
    final List<String> globalExpected = Collections.synchronizedList(new ArrayList<String>());
    final List<Exception> errors = Collections.synchronizedList(new ArrayList<Exception>());
    for (int i = 0; i < numThreads; i++) {
        final int id = i;
        Thread t = new Thread() {
            @Override/*from w ww.  ja  v a2 s .  co  m*/
            public void run() {
                try {
                    List<String> expected = strings(100);
                    globalExpected.addAll(expected);
                    startLatch.countDown();
                    startLatch.await();
                    // half batch, half do not
                    if (id % 2 == 0) {
                        for (String s : expected) {
                            wal.writeEntry(new WALEntry<Text>(new Text(s), seqid.incrementAndGet()));
                        }
                    } else {
                        List<WALEntry<Text>> batch = Lists.newArrayList();
                        for (String s : expected) {
                            batch.add(new WALEntry<Text>(new Text(s), seqid.incrementAndGet()));
                        }
                        wal.writeEntries(batch);
                    }
                } catch (Exception e) {
                    logger.warn("Error doing appends", e);
                    errors.add(e);
                } finally {
                    stopLatch.countDown();
                }
            }
        };
        t.setDaemon(true);
        t.start();
    }
    Assert.assertTrue(stopLatch.await(30, TimeUnit.SECONDS));
    Assert.assertEquals(Collections.EMPTY_LIST, errors);
    wal.close();
    wal = new WAL<Text>(dataDir, Text.class);
    WALReplayResult<Text> result = wal.replay();
    Assert.assertEquals(1000, result.getSequenceID());
    List<String> actual = toStringList(result.getResults());
    // we don't know what order the items threads will be able to
    // append to the wal, so sort to the lists to make then sensible
    Collections.sort(actual);
    Collections.sort(globalExpected);
    Assert.assertEquals(globalExpected, actual);
}

From source file:org.wallerlab.yoink.regionizer.partitioner.DensityPartitioner.java

private Region findAdaptiveSearchRegionInNonQmCoreRegion(Map<Region.Name, Region> regions,
        double densityThreshold) {
    Region adaptiveSearchRegion = simpleRegionFactory.create(Region.Name.ADAPTIVE_SEARCH);
    List<Molecule> moleculesInAdaptiveSearch = Collections.synchronizedList(new ArrayList<Molecule>());
    Set<Molecule> moleculesInNonQmCore = regions.get(Region.Name.NONQM_CORE).getMolecules();
    Set<Molecule> moleculesInQmCore = regions.get(Region.Name.QM_CORE).getMolecules();
    checkEveryNonQMCoreMolecule(densityThreshold, moleculesInAdaptiveSearch, moleculesInNonQmCore,
            moleculesInQmCore);//  w w  w. ja va  2s .  c  o  m
    for (Molecule molecule : moleculesInAdaptiveSearch) {
        adaptiveSearchRegion.addMolecule(molecule, molecule.getIndex());
    }
    return adaptiveSearchRegion;
}

From source file:org.apache.hadoop.distributedloadsimulator.sls.nodemanager.NMSimulator.java

public void init(String nodeIdStr, int memory, int cores, int dispatchTime, int heartBeatInterval,
        ResourceManager rm, Configuration conf) throws IOException, YarnException, ClassNotFoundException {
    super.init(dispatchTime, dispatchTime + 1000000L * heartBeatInterval, heartBeatInterval);
    conf.setClass(YarnConfiguration.LEADER_CLIENT_FAILOVER_PROXY_PROVIDER,
            ConfiguredLeastLoadedRMFailoverHAProxyProvider.class, RMFailoverProxyProvider.class);
    Class<? extends RMFailoverProxyProvider> defaultProviderClass = (Class<? extends RMFailoverProxyProvider>) Class
            .forName(YarnConfiguration.DEFAULT_LEADER_CLIENT_FAILOVER_PROXY_PROVIDER);
    this.resourceTracker = ServerRMProxy.createRMProxy(conf, ResourceTracker.class,
            conf.getBoolean(YarnConfiguration.DISTRIBUTED_RM, YarnConfiguration.DEFAULT_DISTRIBUTED_RM));
    // create resource
    String rackHostName[] = SLSUtils.getRackHostName(nodeIdStr);
    this.node = NodeInfo.newNodeInfo(rackHostName[0], rackHostName[1], BuilderUtils.newResource(memory, cores));
    //this.nodeId = NodeId.newInstance(InetAddress.getLocalHost().getHostName(),port);
    //this.rm = rm;
    // init data structures
    completedContainerList = Collections.synchronizedList(new ArrayList<ContainerId>());
    releasedContainerList = Collections.synchronizedList(new ArrayList<ContainerId>());
    containerQueue = new DelayQueue<ContainerSimulator>();
    amContainerList = Collections.synchronizedList(new ArrayList<ContainerId>());
    runningContainers = new ConcurrentHashMap<ContainerId, ContainerSimulator>();
    // register NM with RM
    RegisterNodeManagerRequest req = Records.newRecord(RegisterNodeManagerRequest.class);
    req.setNodeId(node.getNodeID());/*from  ww  w . j  ava2s .  c o m*/
    req.setResource(node.getTotalCapability());
    req.setHttpPort(80);
    LOG.info("send registration request " + node.getNodeID());
    RegisterNodeManagerResponse response = resourceTracker.registerNodeManager(req);
    LOG.info("registration done " + node.getNodeID());
    masterKey = response.getNMTokenMasterKey();
    containerMasterKey = response.getContainerTokenMasterKey();
}

From source file:it.unibo.alchemist.modelchecker.AlchemistASMC.java

/**
 * Construct an instance with given parameters, specifiyng minimum sample
 * size./*from  w w  w  . j  a  va 2s  . c  o m*/
 * 
 * @param delta
 *            approximation
 * @param alpha
 *            confidence
 * @param p
 *            property to verify
 * @param pa
 *            property aggregator to use
 * @param min
 *            Minimum sample size
 */
protected AlchemistASMC(final double delta, final double alpha, final Property<T, ?, D> p,
        final PropertyAggregator<R, D> pa, final int min) {
    d = delta;
    a = alpha;
    minN = min;
    maxN = computeSampleSizeUB(delta, alpha);
    property = p;
    aggregator = pa;
    pList = Collections.synchronizedList(new ArrayList<Property<T, ?, D>>(min));
}

From source file:eionet.cr.harvest.scheduled.HarvestingJob.java

/**
 *
 * @throws DAOException//from w ww.j a  v  a2  s.  c  om
 */
private void handleBatchQueue() throws DAOException {

    // Even if it is not currently a batch harvesting hour, we shall proceed to getting the list of next scheduled sources, and
    // looping over them, as there are specific sources for which the batch-harvesting hours should be ignored. Currently these
    // are sources whose harvest interval is less than 8 hours.

    if (isBatchHarvestingHour()) {
        LOGGER.trace("Handling batch queue...");
    }

    // Initialize batch queue collection.
    batchQueue = Collections.synchronizedList(new ArrayList<HarvestSourceDTO>());

    // Initialize collection for sources that will have to be deleted.
    HashSet<String> sourcesToDelete = new HashSet<String>();

    // Initialize harvest source DAO.
    HarvestSourceDAO sourceDao = DAOFactory.get().getDao(HarvestSourceDAO.class);

    // Get next scheduled sources.
    List<HarvestSourceDTO> nextScheduledSources = getNextScheduledSources();
    if (isBatchHarvestingHour()) {
        LOGGER.trace(nextScheduledSources.size() + " next scheduled sources found");
    }

    // Loop over next scheduled sources.
    for (HarvestSourceDTO sourceDTO : nextScheduledSources) {

        // If source is marked with permanent error then increase its unavailability count if it's a
        // priority source, or simply delete it if it's not a priority source.
        // If source not marked with permanent and its unavailability count is >=5 and it's a
        // non-priority source then delete it.
        // In all other cases, add the harvest source to the batch-harvest queue.
        if (sourceDTO.isPermanentError()) {
            if (sourceDTO.isPrioritySource()) {
                LOGGER.trace("Increasing unavailability count of permanent-error priority source "
                        + sourceDTO.getUrl());
                sourceDao.increaseUnavailableCount(sourceDTO.getUrl());
            } else {
                LOGGER.debug(
                        sourceDTO.getUrl() + "  will be deleted as a non-priority source with permanent error");
                sourcesToDelete.add(sourceDTO.getUrl());
            }
        } else if (sourceDTO.getCountUnavail() >= 5) {
            if (!sourceDTO.isPrioritySource()) {
                LOGGER.debug(sourceDTO.getUrl()
                        + "  will be deleted as a non-priority source with unavailability >= 5");
                sourcesToDelete.add(sourceDTO.getUrl());
            }
        } else {
            batchQueue.add(sourceDTO);
        }
    }

    // Harvest the batch harvest queue (if anything added to it).
    for (Iterator<HarvestSourceDTO> iter = batchQueue.iterator(); iter.hasNext();) {

        HarvestSourceDTO sourceDTO = iter.next();

        // For sources where interval is less than 8 hours, the batch harvesting hours doesn't apply.
        // They are always harvested.
        boolean ignoreBatchHarvestingHour = sourceDTO.getIntervalMinutes().intValue() < 480;
        if (isBatchHarvestingHour() || ignoreBatchHarvestingHour) {

            // Remove source from batch harvest queue before starting its harvest.
            iter.remove();

            LOGGER.trace("Going to batch-harvest " + sourceDTO.getUrl());
            pullHarvest(sourceDTO, false);
        }
    }

    // Delete sources that were found necessary to delete (if any).
    if (!sourcesToDelete.isEmpty()) {

        LOGGER.debug("Deleting " + sourcesToDelete.size() + " sources found above");
        for (Iterator<String> iter = sourcesToDelete.iterator(); iter.hasNext();) {

            String sourceUrl = iter.next();
            if (CurrentHarvests.contains(sourceUrl)) {
                iter.remove();
                LOGGER.debug("Skipping deletion of " + sourceUrl + " because it is currently being harvested");
            }
        }
        sourceDao.removeHarvestSources(sourcesToDelete);
    }
}

From source file:org.alfresco.repo.action.parameter.NodeParameterProcessor.java

/**
 * Add suggestion definition to the list used to get properties suggestions from.
 *
 * @param  definition  Type or aspect//w w w .j  a  v  a2 s . c  om
 */
public void addSuggestionDefinition(QName definition) {
    if (this.suggestionDefinitions == null) {
        this.suggestionDefinitions = Collections.synchronizedList(new ArrayList<QName>());
    }
    this.suggestionDefinitions.add(definition);
}

From source file:com.amazonaws.services.kinesis.log4j.helpers.AmazonKinesisPutRecordsHelper.java

/**
 * Constructor./*from w ww  .j  a  va 2s  . c  om*/
 * @param amazonKinesisClient                  Amazon Kinesis Client.
 * @param streamName                           Stream Name.
 * @param initialSequenceNumberForOrdering     Initial Sequence Number For Ordering.
 * @param isUsingSequenceNumberForOrdering     If Using Seqeuence Number For Ordering.
 */
AmazonKinesisPutRecordsHelper(AmazonKinesisAsyncClient amazonKinesisClient, String streamName,
        String initialSequenceNumberForOrdering, boolean isUsingSequenceNumberForOrdering, int batchSize,
        int numOfShards, long timeThreshHoldForFlushInMilli) {
    this.amazonKinesisClient = amazonKinesisClient;
    this.asyncCallHander = new AsyncBatchPutHandler(streamName, this);
    this.streamName = streamName;
    this.sequenceNumberForOrdering = initialSequenceNumberForOrdering;
    this.isUsingSequenceNumberForOrdering = isUsingSequenceNumberForOrdering;
    this.batchSize = batchSize;
    this.numOfShards = numOfShards;
    this.timeThreshHoldForFlushInMilli = timeThreshHoldForFlushInMilli;
    shardToputRecordsRequestEntryMap = new ConcurrentHashMap<>();
    shardToFlushTime = new ConcurrentHashMap<>();
    for (int i = 1; i <= numOfShards; i++) {
        String key = "shard" + i;
        shardToputRecordsRequestEntryMap.put(key,
                Collections.synchronizedList(new ArrayList<PutRecordsRequestEntry>()));
        shardToFlushTime.put(key, new AtomicLong(System.currentTimeMillis()));
    }
    long scheduleTime = determineScheduleTime(timeThreshHoldForFlushInMilli);
    flushBucketScheduler.scheduleAtFixedRate(new FlushBucketTask(timeThreshHoldForFlushInMilli,
            shardToputRecordsRequestEntryMap, shardToFlushTime), scheduleTime, scheduleTime,
            TimeUnit.MILLISECONDS);
}