Example usage for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue

List of usage examples for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue.

Prototype

public ConcurrentLinkedQueue() 

Source Link

Document

Creates a ConcurrentLinkedQueue that is initially empty.

Usage

From source file:com.netflix.discovery.shared.Applications.java

/**
 * Add the instance to the given map based if the vip address matches with
 * that of the instance. Note that an instance can be mapped to multiple vip
 * addresses.//from w w  w  .  j  a va  2 s  . c  o m
 *
 */
private void addInstanceToMap(InstanceInfo info, String vipAddresses,
        Map<String, AbstractQueue<InstanceInfo>> vipMap) {
    if (vipAddresses != null) {
        String[] vipAddressArray = vipAddresses.split(",");
        for (String vipAddress : vipAddressArray) {
            String vipName = vipAddress.toUpperCase(Locale.ROOT);
            AbstractQueue<InstanceInfo> instanceInfoList = vipMap.get(vipName);
            if (instanceInfoList == null) {
                instanceInfoList = new ConcurrentLinkedQueue<InstanceInfo>();
                vipMap.put(vipName, instanceInfoList);
            }
            instanceInfoList.add(info);
        }
    }
}

From source file:ict.ocrabase.main.java.client.bulkload.LoadHFiles.java

public void doQuickBulkLoad(Map<String, Map<byte[], List<Path>>> loadMap)
        throws IOException, InterruptedException {
    Configuration config = getConf();
    int loadThreadNum = config.getInt("bulkload.loadthread.num", 10);

    for (Map.Entry<String, Map<byte[], List<Path>>> entry : loadMap.entrySet()) {
        String tableName = entry.getKey();

        LOG.info("Start loading table " + tableName);

        Map<byte[], List<Path>> regionMap = entry.getValue();

        ConcurrentLinkedQueue<LoadItem> conQueue = new ConcurrentLinkedQueue<LoadHFiles.LoadItem>();
        for (Map.Entry<byte[], List<Path>> item : regionMap.entrySet()) {
            conQueue.add(new LoadItem(item.getKey(), item.getValue()));
        }/*from w  w  w. j  a v  a 2 s.  c o m*/

        int threadNum;
        if (regionMap.size() < loadThreadNum) {
            threadNum = regionMap.size();
        } else {
            threadNum = loadThreadNum;
        }
        LoadHFileThread[] threads = new LoadHFileThread[threadNum];
        for (int i = 0; i < threadNum; i++) {
            threads[i] = new LoadHFileThread(tableName, conQueue);
        }

        LOG.info("Starting threads");

        for (int i = 0; i < threadNum; i++) {
            threads[i].start();
        }

        LOG.info("Started threads!");

        for (int i = 0; i < threadNum; i++) {
            threads[i].join();
        }

        progress = 1;
    }
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at a target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them.//from w  ww. j  a  va  2 s  .co m
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS (target QPS).
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS);
    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                int numQueriesExecutedInt = numQueriesExecuted.get();
                LOGGER.info(
                        "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                        startQPS, timePassed, numQueriesExecutedInt,
                        numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                        totalBrokerTime.get() / (double) numQueriesExecutedInt,
                        totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                reportStartTime = currentTime;
                numReportIntervals++;

                if ((numIntervalsToReportAndClearStatistics != 0)
                        && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                    numReportIntervals = 0;
                    startTime = currentTime;
                    reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                            statisticsList);
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            startQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:edu.brown.hstore.HStoreSite.java

/**
 * Constructor//from   w ww  . jav  a  2 s .  c o  m
 * @param coordinators
 * @param p_estimator
 */
protected HStoreSite(int site_id, CatalogContext catalogContext, HStoreConf hstore_conf) {
    assert (hstore_conf != null);
    assert (catalogContext != null);
    this.hstore_conf = hstore_conf;
    this.catalogContext = catalogContext;

    this.catalog_site = this.catalogContext.getSiteById(site_id);
    if (this.catalog_site == null)
        throw new RuntimeException("Invalid site #" + site_id);

    this.catalog_host = this.catalog_site.getHost();
    this.site_id = this.catalog_site.getId();
    this.site_name = HStoreThreadManager.getThreadName(this.site_id, null);

    final int num_partitions = this.catalogContext.numberOfPartitions;
    this.local_partitions.addAll(CatalogUtil.getLocalPartitionIds(catalog_site));
    int num_local_partitions = this.local_partitions.size();

    for (Status s : Status.values()) {
        this.deletable_txns.put(s, new ConcurrentLinkedQueue<Long>());
    } // FOR

    this.executors = new PartitionExecutor[num_partitions];
    this.executor_threads = new Thread[num_partitions];
    this.depTrackers = new DependencyTracker[num_partitions];

    // Get the hasher we will use for this HStoreSite
    this.hasher = ClassUtil.newInstance(hstore_conf.global.hasher_class,
            new Object[] { this.catalogContext, num_partitions },
            new Class<?>[] { CatalogContext.class, int.class });
    this.p_estimator = new PartitionEstimator(this.catalogContext, this.hasher);
    this.remoteTxnEstimator = new RemoteEstimator(this.p_estimator);

    // ARIES 
    if (hstore_conf.site.aries) {
        // Don't use both recovery modes
        assert (hstore_conf.site.snapshot == false);

        LOG.warn("Starting ARIES recovery at site");

        String siteName = HStoreThreadManager.formatSiteName(this.getSiteId());
        String ariesSiteDirPath = hstore_conf.site.aries_dir + File.separatorChar + siteName
                + File.separatorChar;

        this.m_ariesLogFileName = ariesSiteDirPath + m_ariesDefaultLogFileName;
        int numPartitionsPerSite = this.catalog_site.getPartitions().size();
        int numSites = this.catalogContext.numberOfSites;

        LOG.warn("ARIES : Log Native creation :: numSites : " + numSites + " numPartitionsPerSite : "
                + numPartitionsPerSite);
        this.m_ariesLog = new AriesLogNative(numSites, numPartitionsPerSite, this.m_ariesLogFileName);
        this.m_recoveryLog = new VoltLogger("RECOVERY");
    }

    // **IMPORTANT**
    // Always clear out the CatalogUtil and BatchPlanner before we start our new HStoreSite
    // TODO: Move this cache information into CatalogContext
    CatalogUtil.clearCache(this.catalogContext.database);
    BatchPlanner.clear(this.catalogContext.numberOfPartitions);
    TransactionCounter.resetAll(this.catalogContext);

    // Only preload stuff if we were asked to
    if (hstore_conf.site.preload) {
        if (debug.val)
            LOG.debug("Preloading cached objects");
        try {
            // Don't forget our CatalogUtil friend!
            CatalogUtil.preload(this.catalogContext.database);

            // Load up everything the QueryPlanUtil
            PlanNodeUtil.preload(this.catalogContext.database);

            // Then load up everything in the PartitionEstimator
            this.p_estimator.preload();
        } catch (Exception ex) {
            throw new RuntimeException("Failed to prepare HStoreSite", ex);
        }
    }

    // Offset Hack
    this.local_partition_offsets = new int[num_partitions];
    Arrays.fill(this.local_partition_offsets, HStoreConstants.NULL_PARTITION_ID);
    int offset = 0;
    for (int partition : this.local_partitions) {
        this.local_partition_offsets[partition] = offset++;
    } // FOR

    // -------------------------------
    // THREADS
    // -------------------------------

    EventObserver<Pair<Thread, Throwable>> observer = new EventObserver<Pair<Thread, Throwable>>() {
        @Override
        public void update(EventObservable<Pair<Thread, Throwable>> o, Pair<Thread, Throwable> arg) {
            Thread thread = arg.getFirst();
            Throwable error = arg.getSecond();
            String threadName = "<unknown>";
            if (thread != null)
                threadName = thread.getName();
            LOG.fatal(String.format("Thread %s had a fatal error: %s", threadName,
                    (error != null ? error.getMessage() : null)));
            error.printStackTrace();
            hstore_coordinator.shutdownClusterBlocking(error);
        }
    };
    this.exceptionHandler.addObserver(observer);
    Thread.setDefaultUncaughtExceptionHandler(this.exceptionHandler);

    // HStoreSite Thread Manager (this always get invoked first)
    this.threadManager = new HStoreThreadManager(this);

    // Distributed Transaction Queue Manager
    this.txnQueueManager = new TransactionQueueManager(this);

    // One Transaction Cleaner for every eight partitions
    int numCleaners = (int) Math.ceil(num_local_partitions / 8.0);
    for (int i = 0; i < numCleaners; i++) {
        this.txnCleaners.add(new TransactionCleaner(this));
    } // FOR

    // MapReduce Transaction helper thread
    if (catalogContext.getMapReduceProcedures().isEmpty() == false) {
        this.mr_helper = new MapReduceHelperThread(this);
    } else {
        this.mr_helper = null;
    }

    // Separate TransactionIdManager per partition
    if (hstore_conf.site.txn_partition_id_managers) {
        this.txnIdManagers = new TransactionIdManager[num_partitions];
        for (int partition : this.local_partitions) {
            this.txnIdManagers[partition] = new TransactionIdManager(partition);
        } // FOR
    }
    // Single TransactionIdManager for the entire site
    else {
        this.txnIdManagers = new TransactionIdManager[] { new TransactionIdManager(this.site_id) };
    }

    // Command Logger
    if (hstore_conf.site.commandlog_enable) {
        // It would be nice if we could come up with a unique name for this
        // invocation of the system (like the cluster instanceId). But for now
        // we'll just write out to our directory...

        java.util.Date date = new java.util.Date();
        Timestamp current = new Timestamp(date.getTime());
        String nonce = Long.toString(current.getTime());

        File logFile = new File(hstore_conf.site.commandlog_dir + File.separator
                + this.getSiteName().toLowerCase() + "_" + nonce + CommandLogWriter.LOG_OUTPUT_EXT);

        this.commandLogger = new CommandLogWriter(this, logFile);
    } else {
        this.commandLogger = null;
    }

    // AdHoc Support
    if (hstore_conf.site.exec_adhoc_sql) {
        this.asyncCompilerWorkThread = new AsyncCompilerWorkThread(this, this.site_id);
    } else {
        this.asyncCompilerWorkThread = null;
    }

    // The AntiCacheManager will allow us to do special things down in the EE
    // for evicted tuples
    if (hstore_conf.site.anticache_enable) {
        this.anticacheManager = new AntiCacheManager(this);
    } else {
        this.anticacheManager = null;
    }

    // -------------------------------
    // NETWORK SETUP
    // -------------------------------

    this.voltNetwork = new VoltNetwork(this);
    this.clientInterface = new ClientInterface(this, this.catalog_site.getProc_port());

    // -------------------------------
    // TRANSACTION ESTIMATION
    // -------------------------------

    // Transaction Properties Initializer
    this.txnInitializer = new TransactionInitializer(this);

    // CACHED MESSAGES
    this.REJECTION_MESSAGE = "Transaction was rejected by " + this.getSiteName();

    // -------------------------------
    // STATS SETUP
    // -------------------------------

    this.initTxnProcessors();
    this.initStatSources();

    // Profiling
    if (hstore_conf.site.profiling) {
        this.profiler = new HStoreSiteProfiler();
        if (hstore_conf.site.status_exec_info) {
            this.profiler.network_idle.resetOnEventObservable(this.startWorkload_observable);
        }
    } else {
        this.profiler = null;
    }

    this.status_monitor = new HStoreSiteStatus(this, hstore_conf);

    LoggerUtil.refreshLogging(hstore_conf.global.log_refresh);
}

From source file:com.ibm.crail.tools.CrailBenchmark.java

void createFile(String filename, int loop) throws Exception, InterruptedException {
    System.out.println("createFile, filename " + filename + ", loop " + loop);

    //warmup//w  w  w  .j a  v a2s .c  om
    ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>();
    CrailBuffer buf = fs.allocateBuffer();
    bufferQueue.add(buf);
    warmUp(filename, warmup, bufferQueue);
    fs.freeBuffer(buf);

    //benchmark
    System.out.println("starting benchmark...");
    fs.getStatistics().reset();
    LinkedBlockingQueue<String> pathQueue = new LinkedBlockingQueue<String>();
    fs.create(filename, CrailNodeType.DIRECTORY, CrailStorageClass.DEFAULT, CrailLocationClass.DEFAULT).get()
            .syncDir();
    int filecounter = 0;
    for (int i = 0; i < loop; i++) {
        String name = "" + filecounter++;
        String f = filename + "/" + name;
        pathQueue.add(f);
    }

    double ops = 0;
    long start = System.currentTimeMillis();
    while (!pathQueue.isEmpty()) {
        String path = pathQueue.poll();
        fs.create(path, CrailNodeType.DATAFILE, CrailStorageClass.DEFAULT, CrailLocationClass.DEFAULT).get()
                .syncDir();
    }
    long end = System.currentTimeMillis();
    double executionTime = ((double) (end - start)) / 1000.0;
    double latency = 0.0;
    if (executionTime > 0) {
        latency = 1000000.0 * executionTime / ops;
    }

    System.out.println("execution time " + executionTime);
    System.out.println("ops " + ops);
    System.out.println("latency " + latency);

    fs.getStatistics().print("close");
}

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

@Override
public void incomeAMessage(String dstVertexID, IMessage msg) {
    // Evaluate the length of the msg, 16 means 1 long and 2 int.
    int length = sizer.sizeOf(msg) + this.sizeOfRef;
    // Accumulate the total size of messages.
    this.totalSizeOfMessages = this.totalSizeOfMessages + length;
    this.totalCount = this.totalCount + 1;
    this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem + length;
    // Message number in memory
    this.countOfMessagesDataInMem = this.countOfMessagesDataInMem + 1;
    // Get the hash bucket index.
    int hashCode = dstVertexID.hashCode();
    int hashIndex = hashCode % this.hashBucketNumber; // bucket index
    hashIndex = (hashIndex < 0 ? hashIndex + this.hashBucketNumber : hashIndex);
    // Update the bucket meta data.
    BucketMeta meta = this.incomingQueues.get(hashIndex);
    meta.count = meta.count + 1;/*www  .j a v  a 2s. c om*/
    meta.countInMemory = meta.countInMemory + 1;
    meta.length = meta.length + length;
    meta.lengthInMemory = meta.lengthInMemory + length;
    // Add the msg into the incoming queue for the dstVertexID.
    ConcurrentLinkedQueue<IMessage> incomingQueue = meta.queueMap.get(dstVertexID);
    if (incomingQueue == null) {
        incomingQueue = new ConcurrentLinkedQueue<IMessage>();
        this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem
                + (sizeOfRef * 2 + (dstVertexID.length() * sizeOfChar) + sizeOfEmptyMessageQueue);
    }
    incomingQueue.add(msg);
    meta.queueMap.put(dstVertexID, incomingQueue);
    // On a new message added.
    onMessageIncomed();
}

From source file:com.ibm.crail.tools.CrailBenchmark.java

void createFileAsync(String filename, int loop, int batch) throws Exception, InterruptedException {
    System.out.println("createFileAsync, filename " + filename + ", loop " + loop + ", batch " + batch);

    //warmup//from www.  j  a  v  a  2  s.c om
    ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>();
    CrailBuffer buf = fs.allocateBuffer();
    bufferQueue.add(buf);
    warmUp(filename, warmup, bufferQueue);
    fs.freeBuffer(buf);

    //benchmark
    System.out.println("starting benchmark...");
    fs.getStatistics().reset();
    LinkedBlockingQueue<Future<CrailNode>> futureQueue = new LinkedBlockingQueue<Future<CrailNode>>();
    LinkedBlockingQueue<CrailFile> fileQueue = new LinkedBlockingQueue<CrailFile>();
    LinkedBlockingQueue<String> pathQueue = new LinkedBlockingQueue<String>();
    fs.create(filename, CrailNodeType.DIRECTORY, CrailStorageClass.DEFAULT, CrailLocationClass.DEFAULT).get()
            .syncDir();

    for (int i = 0; i < loop; i++) {
        String name = "/" + i;
        String f = filename + name;
        pathQueue.add(f);
    }

    long start = System.currentTimeMillis();
    for (int i = 0; i < loop; i += batch) {
        //single operation == loop
        for (int j = 0; j < batch; j++) {
            String path = pathQueue.poll();
            Future<CrailNode> future = fs.create(path, CrailNodeType.DATAFILE, CrailStorageClass.DEFAULT,
                    CrailLocationClass.DEFAULT);
            futureQueue.add(future);
        }
        for (int j = 0; j < batch; j++) {
            Future<CrailNode> future = futureQueue.poll();
            CrailFile file = future.get().asFile();
            fileQueue.add(file);
        }
        for (int j = 0; j < batch; j++) {
            CrailFile file = fileQueue.poll();
            file.syncDir();
        }
    }
    long end = System.currentTimeMillis();
    double executionTime = ((double) (end - start));
    double latency = executionTime * 1000.0 / ((double) loop);
    System.out.println("execution time [ms] " + executionTime);
    System.out.println("latency [us] " + latency);

    fs.delete(filename, true).get().syncDir();

    fs.getStatistics().print("close");

}

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

@Override
public void outgoAMessage(String outgoingIndex, IMessage msg) {
    // Evaluate the length of the msg, 16 means 1 long and 2 int.
    int length = sizer.sizeOf(msg) + this.sizeOfRef;
    // Accumulate the total size of messages.
    this.totalSizeOfMessages = this.totalSizeOfMessages + length;
    this.totalCount = this.totalCount + 1;
    this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem + length;
    // Message number in memory
    this.countOfMessagesDataInMem = this.countOfMessagesDataInMem + 1;
    ConcurrentLinkedQueue<IMessage> queue = this.outgoingQueues.get(outgoingIndex);
    if (queue == null) {
        queue = new ConcurrentLinkedQueue<IMessage>();
        this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem
                + (sizeOfRef * 2 + (outgoingIndex.length() * sizeOfChar) + sizeOfEmptyMessageQueue);
    }/*from  ww w. j  av  a 2  s. com*/
    queue.add(msg);
    this.outgoingQueues.put(outgoingIndex, queue);
    // On a new message outgoed.
    onMessageOutgoed();
}

From source file:org.opendaylight.vpnservice.elan.internal.ElanInterfaceManager.java

@Override
protected void add(InstanceIdentifier<ElanInterface> identifier, ElanInterface elanInterfaceAdded) {
    String elanInstanceName = elanInterfaceAdded.getElanInstanceName();
    String interfaceName = elanInterfaceAdded.getName();
    InterfaceInfo interfaceInfo = interfaceManager.getInterfaceInfo(interfaceName);
    if (interfaceInfo == null) {
        logger.warn("Interface {} is removed from Interface Oper DS due to port down ", interfaceName);
        return;/*from  w  w  w. j  av  a  2s . c o m*/
    }
    ElanInstance elanInstance = ElanUtils.getElanInstanceByName(elanInstanceName);

    if (elanInstance == null) {
        elanInstance = new ElanInstanceBuilder().setElanInstanceName(elanInstanceName)
                .setDescription(elanInterfaceAdded.getDescription()).build();
        //Add the ElanInstance in the Configuration data-store
        ElanUtils.updateOperationalDataStore(broker, idManager, elanInstance);
        elanInstance = ElanUtils.getElanInstanceByName(elanInstanceName);
    }

    Long elanTag = elanInstance.getElanTag();
    // If elan tag is not updated, then put the elan interface into unprocessed entry map and entry. Let entries
    // in this map get processed during ELAN update DCN.
    if (elanTag == null) {
        ConcurrentLinkedQueue<ElanInterface> elanInterfaces = unProcessedElanInterfaces.get(elanInstanceName);
        if (elanInterfaces == null) {
            elanInterfaces = new ConcurrentLinkedQueue<ElanInterface>();
        }
        elanInterfaces.add(elanInterfaceAdded);
        unProcessedElanInterfaces.put(elanInstanceName, elanInterfaces);
        return;
    }
    DataStoreJobCoordinator coordinator = DataStoreJobCoordinator.getInstance();
    ElanInterfaceAddWorker addWorker = new ElanInterfaceAddWorker(elanInstanceName, elanInterfaceAdded,
            interfaceInfo, elanInstance, this);
    coordinator.enqueueJob(elanInstanceName, addWorker, ElanConstants.JOB_MAX_RETRIES);
}

From source file:server.Folder.java

/**
 * Copy all versions of the objects found in a folder. This will create the complete object tree of
 * the objects, so if an object has ancestors or descendants in other folders, those will be copied, too.
 * @param folderContent the content of the folder which should be copied completely.
 * @param otc a ObjectTreeCopier which is configured with a validator and correct activeUser.
 * @param croakOnError if true, stop in case of an error and return a CopyResult which contains the events so far.
 * @return a copyResult containing a collection of all failed and successful attempts at copying the
 * folder's contents./*from w ww  . j a v a 2  s . c  om*/
 */
CopyResult copyAllVersions(Collection<ObjectSystemData> folderContent, ObjectTreeCopier otc,
        Boolean croakOnError) {
    ObjectSystemDataDAO oDao = daoFactory.getObjectSystemDataDAO(HibernateSession.getLocalEntityManager());
    CopyResult copyResult = new CopyResult();

    ConcurrentLinkedQueue<ObjectSystemData> conQueue = new ConcurrentLinkedQueue<ObjectSystemData>();
    conQueue.addAll(folderContent);
    log.debug("starting to copy " + conQueue.size() + " objects");

    for (ObjectSystemData source : conQueue) {
        //            otc.resetCopyResult();
        try {
            // create a full copy of the whole object tree:
            otc.createFullCopy(source);
            copyResult.addCopyResult(otc.getCopyResult());
        } catch (Exception ex) {
            log.debug("objectTreeCopy failed for id " + source.getId(), ex);
            // copy failed - now we have to cleanup and remove the already created copies:
            ObjectSystemData brokenCopy = otc.getCopyCache().get(source);
            if (brokenCopy != null) {
                // we should nuke all other objects with the same root,
                // as they won't be amendable to a copy operation either.
                for (ObjectSystemData osd : conQueue) {
                    if (osd.getRoot().equals(brokenCopy.getRoot())) {
                        conQueue.remove(osd);
                    }
                }

                // recursively delete the broken object tree.
                oDao.delete(brokenCopy.getRoot(), true, true);
            }

            log.debug("cleanup complete.");
            copyResult.addFailure(source, new CinnamonException(ex));
            if (croakOnError) {
                return copyResult;
            }
        }
    }
    return copyResult;
}