Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue() 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE .

Usage

From source file:com.emc.ecs.smart.SmartUploader.java

/**
 * Performs a segmented upload to ECS using the SmartClient and the ECS byte range PUT extensions.  The upload
 * URL will be parsed and the hostname will be enumerated in DNS to see if it contains multiple 'A' records.  If
 * so, those will be used to populate the software load balancer.
 */// w ww.  ja v a  2  s .c o m
private void doSegmentedUpload() {
    try {
        long start = System.currentTimeMillis();
        fileSize = Files.size(fileToUpload);

        // Verify md5Save file path is legit.
        PrintWriter pw = null;
        try {
            if (saveMD5 != null) {
                pw = new PrintWriter(saveMD5);
            }
        } catch (IOException e) {
            System.err.println("Invalid path specified to save local file MD5: " + e.getMessage());
            System.exit(3);
        }

        // Figure out which segment size to use.
        if (segmentSize == -1) {
            if (fileSize >= LARGE_SEGMENT) {
                segmentSize = LARGE_SEGMENT;
            } else {
                segmentSize = SMALL_SEGMENT;
            }
        }

        // Expand the host
        String host = uploadUrl.getHost();
        InetAddress addr = InetAddress.getByName(host);
        List<String> ipAddresses = new ArrayList<>();
        try {
            ipAddresses = getIPAddresses(host);
        } catch (NamingException e) {
            LogMF.warn(l4j, "Could not resolve hostname: {0}: {1}.  Using as-is.", host, e);
            ipAddresses.add(host);
        }
        LogMF.info(l4j, "Host {0} resolves to {1}", host, ipAddresses);

        // Initialize the SmartClient
        SmartConfig smartConfig = new SmartConfig(ipAddresses.toArray(new String[ipAddresses.size()]));
        // We don't need to update the host list
        smartConfig.setHostUpdateEnabled(false);

        // Configure the load balancer
        Client pingClient = SmartClientFactory.createStandardClient(smartConfig,
                new URLConnectionClientHandler());
        pingClient.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        LoadBalancer loadBalancer = smartConfig.getLoadBalancer();
        EcsHostListProvider hostListProvider = new EcsHostListProvider(pingClient, loadBalancer, null, null);
        hostListProvider.setProtocol(uploadUrl.getProtocol());
        if (uploadUrl.getPort() != -1) {
            hostListProvider.setPort(uploadUrl.getPort());
        }
        smartConfig.setHostListProvider(hostListProvider);

        client = SmartClientFactory.createSmartClient(smartConfig, new URLConnectionClientHandler());

        // Add our retry handler
        client.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        client.addFilter(new MD5CheckFilter());
        client.addFilter(new RetryFilter(retryDelay, retryCount));

        // Create a FileChannel for the upload
        fileChannel = new RandomAccessFile(fileToUpload.toFile(), "r").getChannel();

        System.out.printf("Starting upload at %s\n", new Date().toString());
        // The first upload is done without a range to create the initial object.
        doUploadSegment(0);

        // See how many more segments we have
        int segmentCount = (int) (fileSize / (long) segmentSize);
        long remainder = fileSize % segmentSize;
        if (remainder != 0) {
            // Additional bytes at end
            segmentCount++;
        }

        if (segmentCount > 1) {
            // Build a thread pool to upload the segments.
            ThreadPoolExecutor executor = new ThreadPoolExecutor(threadCount, threadCount, 15, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>());

            for (int i = 1; i < segmentCount; i++) {
                executor.execute(new SegmentUpload(i));
            }

            // Wait for completion
            while (true) {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                if (failed) {
                    // Abort!
                    l4j.warn("Error detected, terminating upload");
                    executor.shutdownNow();
                    break;
                }
                if (executor.getQueue().isEmpty()) {
                    l4j.info("All tasks complete, awaiting shutdown");
                    try {
                        executor.shutdown();
                        executor.awaitTermination(1, TimeUnit.MINUTES);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                    break;
                }
            }
        }

        // Done!
        long elapsed = System.currentTimeMillis() - start;
        printRate(fileSize, elapsed);

        // Release buffers
        LogMF.debug(l4j, "buffer count at end: {0}", buffers.size());
        buffers = new LinkedList<>();
        System.out.printf("\nUpload completed at %s\n", new Date().toString());

        // Verify
        if (verifyUrl != null) {

            System.out.printf("starting remote MD5...\n");

            String objectMD5 = computeObjectMD5();
            System.out.printf("Object MD5 = %s\n", objectMD5);

            System.out.printf("Remote MD5 complete at %s\nStarting local MD5\n", new Date().toString());

            // At this point we don't need the clients anymore.
            l4j.debug("Shutting down SmartClient");
            SmartClientFactory.destroy(client);
            SmartClientFactory.destroy(pingClient);

            String fileMD5 = standardChecksum ? computeFileMD5Standard() : computeFileMD5();
            System.out.printf("\nFile on disk MD5 = %s\n", fileMD5);
            System.out.printf("Local MD5 complete at %s\n", new Date().toString());
            if (!fileMD5.equals(objectMD5)) {
                System.err.printf("ERROR: file MD5 does not match object MD5! %s != %s", fileMD5, objectMD5);
                System.exit(10);
            }

            if (saveMD5 != null && pw != null) {
                pw.write(fileMD5);
                pw.close();
            }

            System.out.printf("\nObject verification passed!\n");
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.exit(4);
    }
}

From source file:org.apache.accumulo.core.client.impl.ConditionalWriterImpl.java

@Override
public Iterator<Result> write(Iterator<ConditionalMutation> mutations) {

    BlockingQueue<Result> resultQueue = new LinkedBlockingQueue<Result>();

    List<QCMutation> mutationList = new ArrayList<QCMutation>();

    int count = 0;

    long entryTime = System.currentTimeMillis();

    mloop: while (mutations.hasNext()) {
        ConditionalMutation mut = mutations.next();
        count++;/* ww  w.jav  a 2s .c o m*/

        if (mut.getConditions().size() == 0)
            throw new IllegalArgumentException(
                    "ConditionalMutation had no conditions " + new String(mut.getRow(), UTF_8));

        for (Condition cond : mut.getConditions()) {
            if (!isVisible(cond.getVisibility())) {
                resultQueue.add(new Result(Status.INVISIBLE_VISIBILITY, mut, null));
                continue mloop;
            }
        }

        // copy the mutations so that even if caller changes it, it will not matter
        mutationList.add(new QCMutation(mut, resultQueue, entryTime));
    }

    queue(mutationList);

    return new RQIterator(resultQueue, count);

}

From source file:com.alibaba.jstorm.yarn.appmaster.JstormMaster.java

/**
 * Main run function for the application master
 *
 * @throws YarnException/*  w w  w . jav  a2  s .co m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws Exception {
    LOG.info("Starting JstormMaster");
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    jstormMasterContext.allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(JOYConstants.AM_RM_CLIENT_INTERVAL, allocListener);
    jstormMasterContext.amRMClient = amRMClient;
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    startTimelineClient(conf);
    if (timelineClient != null) {
        publishApplicationAttemptEvent(timelineClient, jstormMasterContext.appAttemptID.toString(),
                DSEvent.DS_APP_ATTEMPT_START, jstormMasterContext.domainId, appSubmitterUgi);
    }

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    jstormMasterContext.appMasterHostname = NetUtils.getHostname();
    //get available port
    buildPortScanner();
    jstormMasterContext.appMasterThriftPort = portScanner.getAvailablePort();

    //since appMasterRpcPort not used yet,  set appMasterRpcPort to appMasterThriftPort
    jstormMasterContext.appMasterRpcPort = jstormMasterContext.appMasterThriftPort;

    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(
            jstormMasterContext.appMasterHostname, jstormMasterContext.appMasterRpcPort,
            jstormMasterContext.appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    jstormMasterContext.maxMemory = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capability of resources in this cluster " + jstormMasterContext.maxMemory);

    jstormMasterContext.maxVcores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capability of resources in this cluster " + jstormMasterContext.maxVcores);

    // A resource ask cannot exceed the max.
    if (jstormMasterContext.containerMemory > jstormMasterContext.maxMemory) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + jstormMasterContext.containerMemory + ", max="
                + jstormMasterContext.maxMemory);
        jstormMasterContext.containerMemory = jstormMasterContext.maxMemory;
    }

    if (jstormMasterContext.containerVirtualCores > jstormMasterContext.maxVcores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + jstormMasterContext.containerVirtualCores + ", max="
                + jstormMasterContext.maxVcores);
        jstormMasterContext.containerVirtualCores = jstormMasterContext.maxVcores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(jstormMasterContext.appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    jstormMasterContext.numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    //Setup RegistryOperations
    registryOperations = RegistryOperationsFactory.createInstance(JOYConstants.YARN_REGISTRY, conf);
    setupInitialRegistryPaths();
    registryOperations.start();

    //add previous AM containers to supervisor and nimbus container list
    for (Container container : previousAMRunningContainers) {

        String containerPath = RegistryUtils.componentPath(JOYConstants.APP_TYPE,
                jstormMasterContext.instanceName,
                container.getId().getApplicationAttemptId().getApplicationId().toString(),
                container.getId().toString());
        ServiceRecord sr = null;
        try {
            if (!registryOperations.exists(containerPath)) {

                String contianerHost = container.getNodeId().getHost();
                registryOperations.mknode(containerPath, true);
                sr = new ServiceRecord();
                sr.set(JOYConstants.HOST, contianerHost);
                sr.set(YarnRegistryAttributes.YARN_ID, container.getId().toString());
                sr.description = JOYConstants.CONTAINER;
                sr.set(YarnRegistryAttributes.YARN_PERSISTENCE, PersistencePolicies.CONTAINER);
                registryOperations.bind(containerPath, sr, BindFlags.OVERWRITE);
            }
        } catch (IOException e) {
            e.printStackTrace();
        }

        if (container.getPriority().getPriority() == 0)
            jstormMasterContext.supervisorContainers.add(container);
        else if (container.getPriority().getPriority() == 1) {
            jstormMasterContext.nimbusContainers.add(container);
        }
    }

    jstormMasterContext.requestBlockingQueue = new LinkedBlockingQueue<ContainerRequest>();

    jstormMasterContext.service_user_name = RegistryUtils.currentUser();

    jstormMasterContext.instanceName = conf.get(JOYConstants.INSTANCE_NAME_KEY);
    this.jstormMasterContext.user = conf.get(JOYConstants.JSTORM_YARN_USER);
    this.jstormMasterContext.password = conf.get(JOYConstants.JSTORM_YARN_PASSWORD);
    this.jstormMasterContext.oldPassword = conf.get(JOYConstants.JSTORM_YARN_OLD_PASSWORD);

    LOG.info("find available port for am rpc server which is : " + jstormMasterContext.appMasterThriftPort);

    String appPath = RegistryUtils.servicePath(JOYConstants.APP_TYPE, jstormMasterContext.instanceName,
            jstormMasterContext.appAttemptID.getApplicationId().toString());
    String instancePath = RegistryUtils.serviceclassPath(JOYConstants.APP_TYPE,
            jstormMasterContext.instanceName);

    LOG.info("Registering application " + jstormMasterContext.appAttemptID.getApplicationId().toString());

    ServiceRecord application = setupServiceRecord();
    jstormMasterContext.nimbusDataDirPrefix = conf.get(JOYConstants.INSTANCE_DATA_DIR_KEY);
    LOG.info("generate instancePath on zk , path is:" + instancePath);

    if (registryOperations.exists(instancePath)) {
        ServiceRecord previousRegister = registryOperations.resolve(instancePath);
        application.set(JOYConstants.NIMBUS_HOST,
                previousRegister.get(JOYConstants.NIMBUS_HOST, JOYConstants.EMPTY));
        application.set(JOYConstants.NIMBUS_CONTAINER,
                previousRegister.get(JOYConstants.NIMBUS_CONTAINER, JOYConstants.EMPTY));
        application.set(JOYConstants.NIMBUS_LOCAL_DIR,
                previousRegister.get(JOYConstants.NIMBUS_LOCAL_DIR, JOYConstants.EMPTY));

        jstormMasterContext.previousNimbusHost = previousRegister.get(JOYConstants.NIMBUS_HOST, "");

        Date now = new Date();
        Map<String, ServiceRecord> apps = RegistryUtils.listServiceRecords(registryOperations, instancePath);
        for (String subAppPath : apps.keySet()) {
            LOG.info("existApp:" + subAppPath);
            ServiceRecord subApp = apps.get(subAppPath);
            Long lastHeatBeatTime = 0l;
            try {
                lastHeatBeatTime = Long.parseLong(subApp.get(JOYConstants.APP_HEARTBEAT_TIME));
            } catch (Exception e) {
                LOG.error(e);
            }
            if (now.getTime() - lastHeatBeatTime > 5 * JOYConstants.HEARTBEAT_TIME_INTERVAL
                    || lastHeatBeatTime > now.getTime() || subAppPath.trim().equals(appPath.trim())) {
                LOG.info("application " + subAppPath + " not response , delete it!");
                registryOperations.delete(subAppPath, true);
            }
        }
    }

    if (!jstormMasterContext.done) {
        jstormMasterContext.config = conf;
        registryOperations.mknode(appPath, true);
        registryOperations.bind(instancePath, application, BindFlags.OVERWRITE);
        ServiceRecord previousRegister = registryOperations.resolve(instancePath);
        LOG.info("previousRegister:" + previousRegister.toString());
        LOG.info("register path: " + instancePath);
        AMServer as = new AMServer(jstormMasterContext.appMasterThriftPort);
        as.Start(this);
    }
}

From source file:com.taobao.datax.engine.schedule.Engine.java

private List<NamedThreadPoolExecutor> initWriterPool(JobConf jobConf, StoragePool sp) throws Exception {
    List<NamedThreadPoolExecutor> writerPoolList = new ArrayList<NamedThreadPoolExecutor>();
    List<JobPluginConf> writerJobConfs = jobConf.getWriterConfs();
    for (JobPluginConf dpjc : writerJobConfs) {
        PluginConf writerConf = pluginReg.get(dpjc.getName());
        if (writerConf.getPath() == null) {
            writerConf.setPath(engineConf.getPluginRootPath() + "writer/" + writerConf.getName());
        }/*from   w ww.  j  av  a 2  s. c o m*/
        String pluginPath = writerConf.getPath();
        PluginParam writerParam = dpjc.getPluginParams();

        Class<?> myClass = pluginClassCache.get(pluginPath);

        if (myClass == null) {
            logger.info(
                    String.format("DataX Writer %s try to load path %s .", writerConf.getName(), pluginPath));
            /*JarLoader jarLoader =  new JarLoader(
                new String[] { writerConf.getPath() });*/
            if (pluginPath.endsWith("oraclewriter")) { //oracle writer jni ?
                logger.info("oraclewriter class load");
                myClass = Class.forName("com.taobao.datax.plugins.writer.oraclewriter.OracleWriter");
            } else {
                JarLoader jarLoader = getJarLoader(pluginPath);
                myClass = jarLoader.loadClass(writerConf.getClassName());
            }

            pluginClassCache.put(pluginPath, myClass);
        }
        WriterWorker writerWorkerForPreAndPost = new WriterWorker(writerConf, myClass);
        writerWorkerForPreAndPost.setParam(writerParam);
        writerWorkerForPreAndPost.init();
        logger.info("DataX Writer prepare work begins .");
        int code = writerWorkerForPreAndPost.prepare(writerParam);
        if (code != 0) {
            throw new DataExchangeException("DataX Writer prepare work failed!");
        }
        logger.info("DataX Writer prepare work ends .");

        logger.info("DataX Writer split work begins .");
        List<PluginParam> writerSplitParams = writerWorkerForPreAndPost.doSplit(writerParam);
        logger.info(String.format("DataX Writer splits this job into %d sub-jobs .", writerSplitParams.size()));
        logger.info("DataX Writer split work ends .");

        int concurrency = dpjc.getConcurrency();
        if (concurrency <= 0 || concurrency > MAX_CONCURRENCY) {
            throw new IllegalArgumentException(
                    String.format("Writer concurrency set to be %d, make sure it must be between [%d, %d] .",
                            concurrency, 1, MAX_CONCURRENCY));
        }

        concurrency = Math.min(dpjc.getConcurrency(), writerSplitParams.size());
        if (concurrency <= 0) {
            concurrency = 1;
        }
        dpjc.setConcurrency(concurrency);

        NamedThreadPoolExecutor writerPool = new NamedThreadPoolExecutor(dpjc.getName() + "-" + dpjc.getId(),
                dpjc.getConcurrency(), dpjc.getConcurrency(), 1L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<Runnable>());

        writerPool.setPostWorker(writerWorkerForPreAndPost);
        writerPool.setParam(writerParam);

        writerPool.prestartAllCoreThreads();
        writerPoolList.add(writerPool);
        logger.info("DataX Writer starts to write data .");

        for (PluginParam pp : writerSplitParams) {
            WriterWorker writerWorker = new WriterWorker(writerConf, pluginClassCache.get(pluginPath));
            writerWorker.setParam(pp);
            writerWorker.setLineReceiver(new BufferedLineExchanger(sp.getStorageForWriter(dpjc.getId()), null,
                    this.engineConf.getStorageBufferSize()));
            //writerPool.execute(writerWorker);
            writerPool.submitJob(writerWorker);
            writerMonitorPool.monitor(writerWorker);
        }
    }
    return writerPoolList;
}

From source file:eu.stratosphere.nephele.services.iomanager.IOManager.java

/**
 * Creates a block channel reader that reads blocks from the given channel. The reader reads asynchronously,
 * such that a read request is accepted, carried out at some (close) point in time, and the full segment
 * is pushed to the reader's return queue.
 * //from w ww. ja v  a 2s.c o  m
 * @param channelID The descriptor for the channel to write to.
 * @return A block channel reader that reads from the given channel.
 * @throws IOException Thrown, if the channel for the reader could not be opened.
 */
public BlockChannelReader createBlockChannelReader(Channel.ID channelID) throws IOException {
    if (this.isClosed) {
        throw new IllegalStateException("I/O-Manger is closed.");
    }

    return new BlockChannelReader(channelID, this.readers[channelID.getThreadNum()].requestQueue,
            new LinkedBlockingQueue<MemorySegment>(), 1);
}

From source file:com.offbynull.portmapper.pcp.PcpController.java

private <T extends PcpResponse> T attemptRequest(ByteBuffer sendBuffer, int attempt, Creator<T> creator)
        throws InterruptedException {

    final LinkedBlockingQueue<ByteBuffer> recvBufferQueue = new LinkedBlockingQueue<>();

    UdpCommunicatorListener listener = new UdpCommunicatorListener() {

        @Override/*from  www  . ja v  a 2  s.c o m*/
        public void incomingPacket(InetSocketAddress sourceAddress, DatagramChannel channel,
                ByteBuffer packet) {
            if (channel != unicastChannel) {
                return;
            }

            recvBufferQueue.add(packet);
        }
    };

    // timeout duration should double each iteration, starting from 250 according to spec
    // i = 1, maxWaitTime = (1 << (1-1)) * 250 = (1 << 0) * 250 = 1 * 250 = 250
    // i = 2, maxWaitTime = (1 << (2-1)) * 250 = (1 << 1) * 250 = 2 * 250 = 500
    // i = 3, maxWaitTime = (1 << (3-1)) * 250 = (1 << 2) * 250 = 4 * 250 = 1000
    // i = 4, maxWaitTime = (1 << (4-1)) * 250 = (1 << 3) * 250 = 8 * 250 = 2000
    // ...
    try {
        communicator.addListener(listener);
        communicator.send(unicastChannel, gateway, sendBuffer);

        int maxWaitTime = (1 << (attempt - 1)) * 250; // NOPMD

        T pcpResponse = null;

        long endTime = System.currentTimeMillis() + maxWaitTime;
        long waitTime;
        while ((waitTime = endTime - System.currentTimeMillis()) > 0L) {
            waitTime = Math.max(waitTime, 0L); // must be at least 0, probably should never happen

            ByteBuffer recvBuffer = recvBufferQueue.poll(waitTime, TimeUnit.MILLISECONDS);

            if (recvBuffer != null) {
                pcpResponse = creator.create(recvBuffer);
                if (pcpResponse != null) {
                    break;
                }
            }
        }

        return pcpResponse;
    } finally {
        communicator.removeListener(listener);
    }
}

From source file:eu.stratosphere.pact.runtime.hash.MutableHashTable.java

public MutableHashTable(TypeSerializer<BT> buildSideSerializer, TypeSerializer<PT> probeSideSerializer,
        TypeComparator<BT> buildSideComparator, TypeComparator<PT> probeSideComparator,
        TypePairComparator<PT, BT> comparator, List<MemorySegment> memorySegments, IOManager ioManager,
        int avgRecordLen) {
    // some sanity checks first
    if (memorySegments == null) {
        throw new NullPointerException();
    }//from w  ww . j  av  a 2  s  .co m
    if (memorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
        throw new IllegalArgumentException("Too few memory segments provided. Hash Join needs at least "
                + MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
    }

    // assign the members
    this.buildSideSerializer = buildSideSerializer;
    this.probeSideSerializer = probeSideSerializer;
    this.buildSideComparator = buildSideComparator;
    this.probeSideComparator = probeSideComparator;
    this.recordComparator = comparator;
    this.availableMemory = memorySegments;
    this.ioManager = ioManager;

    this.avgRecordLen = avgRecordLen > 0 ? avgRecordLen
            : buildSideSerializer.getLength() == -1 ? DEFAULT_RECORD_LEN : buildSideSerializer.getLength();

    // check the size of the first buffer and record it. all further buffers must have the same size.
    // the size must also be a power of 2
    this.totalNumBuffers = memorySegments.size();
    this.segmentSize = memorySegments.get(0).size();
    if ((this.segmentSize & this.segmentSize - 1) != 0) {
        throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
    }
    int bucketsPerSegment = this.segmentSize >> NUM_INTRA_BUCKET_BITS;
    if (bucketsPerSegment == 0) {
        throw new IllegalArgumentException(
                "Hash Table requires buffers of at least " + HASH_BUCKET_SIZE + " bytes.");
    }
    this.bucketsPerSegmentMask = bucketsPerSegment - 1;
    this.bucketsPerSegmentBits = MathUtils.log2strict(bucketsPerSegment);

    // take away the write behind buffers
    this.writeBehindBuffers = new LinkedBlockingQueue<MemorySegment>();
    this.numWriteBehindBuffers = getNumWriteBehindBuffers(memorySegments.size());

    this.partitionsBeingBuilt = new ArrayList<HashPartition<BT, PT>>();
    this.partitionsPending = new ArrayList<HashPartition<BT, PT>>();

    // because we allow to open and close multiple times, the state is initially closed
    this.closed.set(true);
}

From source file:com.ibm.crail.tools.CrailBenchmark.java

void readSequentialAsync(String filename, int size, int loop, int batch) throws Exception {
    System.out.println("readSequentialAsync, filename " + filename + ", size " + size + ", loop " + loop
            + ", batch " + batch);

    ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>();
    for (int i = 0; i < batch; i++) {
        CrailBuffer buf = null;/*from   w ww  .  j a va 2 s .c o  m*/
        if (size == CrailConstants.BUFFER_SIZE) {
            buf = fs.allocateBuffer();
        } else if (size < CrailConstants.BUFFER_SIZE) {
            CrailBuffer _buf = fs.allocateBuffer();
            _buf.clear().limit(size);
            buf = _buf.slice();
        } else {
            buf = OffHeapBuffer.wrap(ByteBuffer.allocateDirect(size));
        }
        bufferQueue.add(buf);
    }

    //warmup
    warmUp(filename, warmup, bufferQueue);

    //benchmark
    System.out.println("starting benchmark...");
    double sumbytes = 0;
    double ops = 0;
    fs.getStatistics().reset();
    CrailFile file = fs.lookup(filename).get().asFile();
    CrailInputStream directStream = file.getDirectInputStream(file.getCapacity());
    HashMap<Integer, CrailBuffer> futureMap = new HashMap<Integer, CrailBuffer>();
    LinkedBlockingQueue<Future<CrailResult>> futureQueue = new LinkedBlockingQueue<Future<CrailResult>>();
    long start = System.currentTimeMillis();
    for (int i = 0; i < batch - 1 && ops < loop; i++) {
        CrailBuffer buf = bufferQueue.poll();
        buf.clear();
        Future<CrailResult> future = directStream.read(buf);
        futureQueue.add(future);
        futureMap.put(future.hashCode(), buf);
        ops = ops + 1.0;
    }
    while (ops < loop) {
        CrailBuffer buf = bufferQueue.poll();
        buf.clear();
        Future<CrailResult> future = directStream.read(buf);
        futureQueue.add(future);
        futureMap.put(future.hashCode(), buf);

        future = futureQueue.poll();
        CrailResult result = future.get();
        buf = futureMap.get(future.hashCode());
        bufferQueue.add(buf);

        sumbytes = sumbytes + result.getLen();
        ops = ops + 1.0;
    }
    while (!futureQueue.isEmpty()) {
        Future<CrailResult> future = futureQueue.poll();
        CrailResult result = future.get();
        futureMap.get(future.hashCode());
        sumbytes = sumbytes + result.getLen();
        ops = ops + 1.0;
    }
    long end = System.currentTimeMillis();
    double executionTime = ((double) (end - start)) / 1000.0;
    double throughput = 0.0;
    double latency = 0.0;
    double sumbits = sumbytes * 8.0;
    if (executionTime > 0) {
        throughput = sumbits / executionTime / 1000.0 / 1000.0;
        latency = 1000000.0 * executionTime / ops;
    }
    directStream.close();

    System.out.println("execution time " + executionTime);
    System.out.println("ops " + ops);
    System.out.println("sumbytes " + sumbytes);
    System.out.println("throughput " + throughput);
    System.out.println("latency " + latency);

    fs.getStatistics().print("close");
}

From source file:com.twitter.distributedlog.auditor.DLAuditor.java

private Map<String, Long> calculateStreamSpaceUsage(final URI uri,
        final com.twitter.distributedlog.DistributedLogManagerFactory factory) throws IOException {
    Collection<String> streams = factory.enumerateAllLogsInNamespace();
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    streamQueue.addAll(streams);//from   w w  w . ja  v  a  2s.c o m

    final Map<String, Long> streamSpaceUsageMap = new ConcurrentSkipListMap<String, Long>();
    final AtomicInteger numStreamsCollected = new AtomicInteger(0);

    executeAction(streamQueue, 10, new Action<String>() {
        @Override
        public void execute(String stream) throws IOException {
            streamSpaceUsageMap.put(stream, calculateStreamSpaceUsage(factory, stream));
            if (numStreamsCollected.incrementAndGet() % 1000 == 0) {
                logger.info("Calculated {} streams from uri {}.", numStreamsCollected.get(), uri);
            }
        }
    });

    return streamSpaceUsageMap;
}

From source file:ca.luniv.afr.service.FeedRetrieverService.java

@Override
protected void onCreate() {
    notificationManager = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
    queue = new LinkedBlockingQueue<Feed>();

    thread = new Thread(null, retriever, "FeedRetrieverService worker");
    thread.setDaemon(true);/*from  w  w  w.  j a  va2  s  .c  om*/
    thread.start();

    super.onCreate();
}