Example usage for java.util Collections synchronizedList

List of usage examples for java.util Collections synchronizedList

Introduction

In this page you can find the example usage for java.util Collections synchronizedList.

Prototype

public static <T> List<T> synchronizedList(List<T> list) 

Source Link

Document

Returns a synchronized (thread-safe) list backed by the specified list.

Usage

From source file:gdsc.smlm.ij.plugins.CreateData.java

/**
 * Create an image from the localisations using the configured PSF width. Draws a new stack
 * image./*from w  ww  .  jav a 2 s  .  c  o  m*/
 * <p>
 * Note that the localisations are filtered using the signal. The input list of localisations will be updated.
 * 
 * @param localisationSets
 * @return The localisations
 */
private List<LocalisationModel> drawImage(final List<LocalisationModelSet> localisationSets) {
    if (localisationSets.isEmpty())
        return null;

    // Create a new list for all localisation that are drawn (i.e. pass the signal filters)
    List<LocalisationModelSet> newLocalisations = Collections
            .synchronizedList(new ArrayList<LocalisationModelSet>(localisationSets.size()));
    photonsRemoved = new AtomicInteger();
    t1Removed = new AtomicInteger();
    tNRemoved = new AtomicInteger();
    photonStats = new SummaryStatistics();

    // Add drawn spots to memory
    results = new MemoryPeakResults();
    Calibration c = new Calibration(settings.pixelPitch, (float) settings.getTotalGain(),
            settings.exposureTime);
    c.emCCD = (settings.getEmGain() > 1);
    c.bias = settings.bias;
    c.readNoise = settings.readNoise * ((settings.getCameraGain() > 0) ? settings.getCameraGain() : 1);
    results.setCalibration(c);
    results.setSortAfterEnd(true);
    results.begin();

    maxT = localisationSets.get(localisationSets.size() - 1).getTime();

    // Display image
    ImageStack stack = new ImageStack(settings.size, settings.size, maxT);

    final double psfSD = getPsfSD();
    if (psfSD <= 0)
        return null;
    ImagePSFModel imagePSFModel = null;

    if (imagePSF) {
        // Create one Image PSF model that can be copied
        imagePSFModel = createImagePSF(localisationSets);
        if (imagePSFModel == null)
            return null;
    }

    IJ.showStatus("Drawing image ...");

    // Multi-thread for speed
    // Note that the default Executors.newCachedThreadPool() will continue to make threads if
    // new tasks are added. We need to limit the tasks that can be added using a fixed size
    // blocking queue.
    // http://stackoverflow.com/questions/1800317/impossible-to-make-a-cached-thread-pool-with-a-size-limit
    // ExecutorService threadPool = Executors.newCachedThreadPool();
    ExecutorService threadPool = Executors.newFixedThreadPool(Prefs.getThreads());
    List<Future<?>> futures = new LinkedList<Future<?>>();

    // Count all the frames to process
    frame = 0;
    totalFrames = maxT;

    // Collect statistics on the number of photons actually simulated

    // Process all frames
    int i = 0;
    int lastT = -1;
    for (LocalisationModelSet l : localisationSets) {
        if (Utils.isInterrupted())
            break;
        if (l.getTime() != lastT) {
            lastT = l.getTime();
            futures.add(threadPool.submit(new ImageGenerator(localisationSets, newLocalisations, i, lastT,
                    createPSFModel(imagePSFModel), results, stack, poissonNoise)));
        }
        i++;
    }
    // Finish processing data
    Utils.waitForCompletion(futures);
    futures.clear();
    if (Utils.isInterrupted()) {
        IJ.showProgress(1);
        return null;
    }

    // Do all the frames that had no localisations
    for (int t = 1; t <= maxT; t++) {
        if (Utils.isInterrupted())
            break;
        if (stack.getPixels(t) == null) {
            futures.add(threadPool.submit(new ImageGenerator(localisationSets, newLocalisations, maxT, t, null,
                    results, stack, poissonNoise)));
        }
    }

    // Finish
    Utils.waitForCompletion(futures);
    threadPool.shutdown();
    IJ.showProgress(1);
    if (Utils.isInterrupted()) {
        return null;
    }
    results.end();

    if (photonsRemoved.get() > 0)
        Utils.log("Removed %d localisations with less than %.1f photons", photonsRemoved.get(),
                settings.minPhotons);
    if (t1Removed.get() > 0)
        Utils.log("Removed %d localisations with no neighbours @ SNR %.2f", t1Removed.get(), settings.minSNRt1);
    if (tNRemoved.get() > 0)
        Utils.log("Removed %d localisations with valid neighbours @ SNR %.2f", tNRemoved.get(),
                settings.minSNRtN);
    if (photonStats.getN() > 0)
        Utils.log("Average photons rendered = %s +/- %s", Utils.rounded(photonStats.getMean()),
                Utils.rounded(photonStats.getStandardDeviation()));

    //System.out.printf("rawPhotons = %f\n", rawPhotons.getMean());
    //System.out.printf("drawPhotons = %f\n", drawPhotons.getMean());
    //Utils.showHistogram("draw photons", drawPhotons, "photons", true, 0, 1000);

    // Update with all those localisation that have been drawn
    localisationSets.clear();
    localisationSets.addAll(newLocalisations);

    IJ.showStatus("Displaying image ...");

    ImageStack newStack = stack;

    if (!settings.rawImage) {
        // Get the global limits and ensure all values can be represented
        Object[] imageArray = stack.getImageArray();
        float[] limits = Maths.limits((float[]) imageArray[0]);
        for (int j = 1; j < imageArray.length; j++)
            limits = Maths.limits(limits, (float[]) imageArray[j]);
        limits[0] = 0; // Leave bias in place
        // Check if the image will fit in a 16-bit range
        if ((limits[1] - limits[0]) < 65535) {
            // Convert to 16-bit
            newStack = new ImageStack(stack.getWidth(), stack.getHeight(), stack.getSize());
            // Account for rounding
            final float min = (float) (limits[0] - 0.5);
            for (int j = 0; j < imageArray.length; j++) {
                float[] image = (float[]) imageArray[j];
                short[] pixels = new short[image.length];
                for (int k = 0; k < pixels.length; k++) {
                    pixels[k] = (short) (image[k] - min);
                }
                newStack.setPixels(pixels, j + 1);
            }
        } else {
            // Keep as 32-bit but round to whole numbers
            for (int j = 0; j < imageArray.length; j++) {
                float[] pixels = (float[]) imageArray[j];
                for (int k = 0; k < pixels.length; k++) {
                    pixels[k] = Math.round(pixels[k]);
                }
            }
        }
    }

    // Show image
    ImagePlus imp = Utils.display(CREATE_DATA_IMAGE_TITLE, newStack);

    ij.measure.Calibration cal = new ij.measure.Calibration();
    String unit = "nm";
    double unitPerPixel = settings.pixelPitch;
    if (unitPerPixel > 100) {
        unit = "um";
        unitPerPixel /= 1000.0;
    }
    cal.setUnit(unit);
    cal.pixelHeight = cal.pixelWidth = unitPerPixel;
    imp.setCalibration(cal);

    imp.setDimensions(1, 1, newStack.getSize());
    imp.resetDisplayRange();
    imp.updateAndDraw();

    saveImage(imp);

    results.setSource(new IJImageSource(imp));
    results.setName(CREATE_DATA_IMAGE_TITLE + " (" + TITLE + ")");
    results.setConfiguration(createConfiguration((float) psfSD));
    results.setBounds(new Rectangle(0, 0, settings.size, settings.size));
    MemoryPeakResults.addResults(results);

    if (benchmarkMode && benchmarkParameters != null)
        benchmarkParameters.setPhotons(results);

    List<LocalisationModel> localisations = toLocalisations(localisationSets);

    savePulses(localisations, results, CREATE_DATA_IMAGE_TITLE);

    // Saved the fixed and moving localisations into different datasets
    saveFixedAndMoving(results, CREATE_DATA_IMAGE_TITLE);

    return localisations;
}

From source file:com.example.pierre.applicompanies.library_http.AsyncHttpClient.java

/**
 * Puts a new request in queue as a new thread in pool to be executed
 *
 * @param client          HttpClient to be used for request, can differ in single requests
 * @param contentType     MIME body type, for POST and PUT requests, may be null
 * @param context         Context of Android application, to hold the reference of request
 * @param httpContext     HttpContext in which the request will be executed
 * @param responseHandler ResponseHandler or its subclass to put the response into
 * @param uriRequest      instance of HttpUriRequest, which means it must be of HttpDelete,
 *                        HttpPost, HttpGet, HttpPut, etc.
 * @return RequestHandle of future request process
 *//*from  www .j  a  va2s.  c  om*/
protected RequestHandle sendRequest(DefaultHttpClient client, HttpContext httpContext,
        HttpUriRequest uriRequest, String contentType, ResponseHandlerInterface responseHandler,
        Context context) {
    if (uriRequest == null) {
        throw new IllegalArgumentException("HttpUriRequest must not be null");
    }

    if (responseHandler == null) {
        throw new IllegalArgumentException("ResponseHandler must not be null");
    }

    if (responseHandler.getUseSynchronousMode() && !responseHandler.getUsePoolThread()) {
        throw new IllegalArgumentException(
                "Synchronous ResponseHandler used in AsyncHttpClient. You should create your response handler in a looper thread or use SyncHttpClient instead.");
    }

    if (contentType != null) {
        if (uriRequest instanceof HttpEntityEnclosingRequestBase
                && ((HttpEntityEnclosingRequestBase) uriRequest).getEntity() != null) {
            Log.w(LOG_TAG, "Passed contentType will be ignored because HttpEntity sets content type");
        } else {
            uriRequest.setHeader(HEADER_CONTENT_TYPE, contentType);
        }
    }

    responseHandler.setRequestHeaders(uriRequest.getAllHeaders());
    responseHandler.setRequestURI(uriRequest.getURI());

    AsyncHttpRequest request = newAsyncHttpRequest(client, httpContext, uriRequest, contentType,
            responseHandler, context);
    threadPool.submit(request);
    RequestHandle requestHandle = new RequestHandle(request);

    if (context != null) {
        // Add request to request map
        List<RequestHandle> requestList = requestMap.get(context);
        synchronized (requestMap) {
            if (requestList == null) {
                requestList = Collections.synchronizedList(new LinkedList<RequestHandle>());
                requestMap.put(context, requestList);
            }
        }

        requestList.add(requestHandle);

        Iterator<RequestHandle> iterator = requestList.iterator();
        while (iterator.hasNext()) {
            if (iterator.next().shouldBeGarbageCollected()) {
                iterator.remove();
            }
        }
    }

    return requestHandle;
}

From source file:org.alfresco.repo.node.NodeServiceTest.java

/**
 * Ensure that nodes cannot be linked to deleted nodes.
 * <p/>//w  ww .j a va2  s .c  o  m
 * Conditions that <i>might</i> cause this are:<br/>
 * <ul>
 *   <li>Node created within a parent node that is being deleted</li>
 *   <li>The node cache is temporarily incorrect when the association is made</li>
 * </ul>
 * <p/>
 * <a href="https://issues.alfresco.com/jira/browse/ALF-12358">Concurrency: Possible to create association references to deleted nodes</a>
 */
@Test
public void testConcurrentLinkToDeletedNode() throws Throwable {
    // First find any broken links to start with
    final NodeEntity params = new NodeEntity();
    params.setId(0L);
    params.setTypeQNameId(deletedTypeQNameId);

    // Find all 'at risk' nodes before the test
    final List<Long> attachedToDeletedIdsBefore = getChildNodesWithDeletedParentNode(params, 0);
    logger.debug("Found child nodes with deleted parent node (before): " + attachedToDeletedIdsBefore);
    final List<Long> orphanedNodeIdsBefore = getChildNodesWithNoParentNode(params, 0);
    logger.debug("Found child nodes without parent (before): " + orphanedNodeIdsBefore);

    final NodeRef[] nodeRefs = new NodeRef[10];
    final NodeRef workspaceRootNodeRef = nodeService.getRootNode(StoreRef.STORE_REF_WORKSPACE_SPACESSTORE);
    buildNodeHierarchy(workspaceRootNodeRef, nodeRefs);

    // Fire off a bunch of threads that create random nodes within the hierarchy created above
    final RetryingTransactionCallback<NodeRef> createChildCallback = new RetryingTransactionCallback<NodeRef>() {
        @Override
        public NodeRef execute() throws Throwable {
            String randomName = this.getClass().getName() + "-" + GUID.generate();
            QName randomQName = QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, randomName);
            Map<QName, Serializable> props = new HashMap<QName, Serializable>();
            props.put(ContentModel.PROP_NAME, randomName);
            // Choose a random parent node from the hierarchy
            int random = new Random().nextInt(10);
            return nodeService.createNode(nodeRefs[random], ContentModel.ASSOC_CONTAINS, randomQName,
                    ContentModel.TYPE_CONTAINER, props).getChildRef();
        }
    };
    final Runnable[] runnables = new Runnable[20];
    final List<NodeRef> nodesAtRisk = Collections.synchronizedList(new ArrayList<NodeRef>(100));

    final List<Thread> threads = new ArrayList<Thread>();
    for (int i = 0; i < runnables.length; i++) {
        runnables[i] = new Runnable() {
            @Override
            public synchronized void run() {
                AuthenticationUtil.setRunAsUserSystem();
                try {
                    wait(1000L); // A short wait before we kick off (should be notified)
                    for (int i = 0; i < 200; i++) {
                        NodeRef nodeRef = txnService.getRetryingTransactionHelper()
                                .doInTransaction(createChildCallback);
                        // Store the node for later checks
                        nodesAtRisk.add(nodeRef);
                        // Wait to give other threads a chance
                        wait(1L);
                    }
                } catch (Throwable e) {
                    // This is expected i.e. we'll just keep doing it until failure
                    logger.debug("Got exception adding child node: ", e);
                }
            }
        };
        Thread thread = new Thread(runnables[i]);
        threads.add(thread);
        thread.start();
    }

    final RetryingTransactionCallback<NodeRef> deleteWithNestedCallback = new RetryingTransactionCallback<NodeRef>() {
        @Override
        public NodeRef execute() throws Throwable {
            // Notify the threads to kick off
            for (int i = 0; i < runnables.length; i++) {
                // Notify the threads to stop waiting
                synchronized (runnables[i]) {
                    runnables[i].notify();
                }
                // Short wait to give thread a chance to run
                synchronized (this) {
                    try {
                        wait(10L);
                    } catch (Throwable e) {
                    }
                }
                ;
            }
            // Delete the parent node
            nodeService.deleteNode(nodeRefs[0]);
            return null;
        }
    };
    txnService.getRetryingTransactionHelper().doInTransaction(deleteWithNestedCallback);

    // Wait for the threads to finish
    for (Thread t : threads) {
        t.join();
    }

    logger.info("All threads should have finished");

    // Find all 'at risk' nodes after the test
    final List<Long> attachedToDeletedIdsAfter = getChildNodesWithDeletedParentNode(params,
            attachedToDeletedIdsBefore.size());
    logger.debug("Found child nodes with deleted parent node (after): " + attachedToDeletedIdsAfter);
    final List<Long> orphanedNodeIdsAfter = getChildNodesWithNoParentNode(params, orphanedNodeIdsBefore.size());
    logger.debug("Found child nodes without parent (after): " + attachedToDeletedIdsAfter);
    // Now need to identify the problem nodes

    if (attachedToDeletedIdsAfter.isEmpty() && orphanedNodeIdsAfter.isEmpty()) {
        // nothing more to test
        return;
    }

    // We are already in a failed state, but check if the orphan cleanup works

    // workaround recovery: force collection of any orphan nodes (ALF-12358 + ALF-13066)
    for (final NodeRef nodeRef : nodesAtRisk) {
        txnService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<Void>() {
            @Override
            public Void execute() throws Throwable {
                if (nodeService.exists(nodeRef)) {
                    nodeService.getPath(nodeRef); // ignore return
                }
                return null;
            }
        });
    }

    // Find all 'at risk' nodes after the test
    final List<Long> attachedToDeletedIdsCleaned = getChildNodesWithDeletedParentNode(params,
            attachedToDeletedIdsBefore.size());
    logger.debug("Found child nodes with deleted parent node (cleaned): " + attachedToDeletedIdsAfter);
    final List<Long> orphanedNodeIdsCleaned = getChildNodesWithNoParentNode(params,
            orphanedNodeIdsBefore.size());
    logger.debug("Found child nodes without parent (cleaned): " + attachedToDeletedIdsAfter);

    // Check
    assertTrue("Expected full cleanup of nodes referencing deleted nodes: " + attachedToDeletedIdsCleaned,
            attachedToDeletedIdsCleaned.isEmpty());
    assertTrue("Expected full cleanup of nodes referencing without parents: " + orphanedNodeIdsCleaned,
            orphanedNodeIdsCleaned.isEmpty());

    // check lost_found ...
    List<NodeRef> lostAndFoundNodeRefs = getLostAndFoundNodes();
    assertFalse(lostAndFoundNodeRefs.isEmpty());

    Set<Long> lostAndFoundNodeIds = new HashSet<Long>(lostAndFoundNodeRefs.size());
    for (NodeRef nodeRef : lostAndFoundNodeRefs) {
        lostAndFoundNodeIds.add((Long) nodeService.getProperty(nodeRef, ContentModel.PROP_NODE_DBID));
    }

    assertTrue("Nodes linked to deleted parent nodes not handled.",
            lostAndFoundNodeIds.containsAll(attachedToDeletedIdsAfter));
    assertTrue("Orphaned nodes not all handled.", lostAndFoundNodeIds.containsAll(orphanedNodeIdsAfter));

    // Now fail because we allowed the situation in the first place
    fail("We allowed orphaned nodes or nodes with deleted parents.");
}

From source file:com.cyberway.issue.crawler.framework.CrawlController.java

private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
    stream.defaultReadObject();//w  w w. j a v  a2  s  .c om
    // Setup status listeners
    this.registeredCrawlStatusListeners = Collections.synchronizedList(new ArrayList<CrawlStatusListener>());
    // Ensure no holdover singleThreadMode
    singleThreadMode = false;
}

From source file:cn.com.loopj.android.http.AsyncHttpClient.java

/**
 * Puts a new request in queue as a new thread in pool to be executed
 *
 * @param client          HttpClient to be used for request, can differ in single requests
 * @param contentType     MIME body type, for POST and PUT requests, may be null
 * @param context         Context of Android application, to hold the reference of request
 * @param httpContext     HttpContext in which the request will be executed
 * @param responseHandler ResponseHandler or its subclass to put the response into
 * @param uriRequest      instance of HttpUriRequest, which means it must be of HttpDelete,
 *                        HttpPost, HttpGet, HttpPut, etc.
 * @return RequestHandle of future request process
 *//*  w ww . j a  v a 2  s. c  om*/
protected RequestHandle sendRequest(DefaultHttpClient client, HttpContext httpContext,
        HttpUriRequest uriRequest, String contentType, ResponseHandlerInterface responseHandler,
        Context context) {
    if (uriRequest == null) {
        throw new IllegalArgumentException("HttpUriRequest must not be null");
    }

    if (responseHandler == null) {
        throw new IllegalArgumentException("ResponseHandler must not be null");
    }

    if (responseHandler.getUseSynchronousMode() && !responseHandler.getUsePoolThread()) {
        throw new IllegalArgumentException(
                "Synchronous ResponseHandler used in AsyncHttpClient. You should create your response handler in a looper thread or use SyncHttpClient instead.");
    }

    if (contentType != null) {
        if (uriRequest instanceof HttpEntityEnclosingRequestBase
                && ((HttpEntityEnclosingRequestBase) uriRequest).getEntity() != null
                && uriRequest.containsHeader(HEADER_CONTENT_TYPE)) {
            log.w(LOG_TAG, "Passed contentType will be ignored because HttpEntity sets content type");
        } else {
            uriRequest.setHeader(HEADER_CONTENT_TYPE, contentType);
        }
    }

    responseHandler.setRequestHeaders(uriRequest.getAllHeaders());
    responseHandler.setRequestURI(uriRequest.getURI());

    AsyncHttpRequest request = newAsyncHttpRequest(client, httpContext, uriRequest, contentType,
            responseHandler, context);
    threadPool.submit(request);
    RequestHandle requestHandle = new RequestHandle(request);

    if (context != null) {
        List<RequestHandle> requestList;
        // Add request to request map
        synchronized (requestMap) {
            requestList = requestMap.get(context);
            if (requestList == null) {
                requestList = Collections.synchronizedList(new LinkedList<RequestHandle>());
                requestMap.put(context, requestList);
            }
        }

        requestList.add(requestHandle);

        Iterator<RequestHandle> iterator = requestList.iterator();
        while (iterator.hasNext()) {
            if (iterator.next().shouldBeGarbageCollected()) {
                iterator.remove();
            }
        }
    }

    return requestHandle;
}

From source file:com.amytech.android.library.utils.asynchttp.AsyncHttpClient.java

/**
 * Puts a new request in queue as a new thread in pool to be executed
 *
 * @param client// w w w  .j  a  v a2s .  c  om
 *            HttpClient to be used for request, can differ in single
 *            requests
 * @param contentType
 *            MIME body type, for POST and PUT requests, may be null
 * @param context
 *            Context of Android application, to hold the reference of
 *            request
 * @param httpContext
 *            HttpContext in which the request will be executed
 * @param responseHandler
 *            ResponseHandler or its subclass to put the response into
 * @param uriRequest
 *            instance of HttpUriRequest, which means it must be of
 *            HttpDelete, HttpPost, HttpGet, HttpPut, etc.
 * @return RequestHandle of future request process
 */
protected RequestHandle sendRequest(DefaultHttpClient client, HttpContext httpContext,
        HttpUriRequest uriRequest, String contentType, ResponseHandlerInterface responseHandler,
        Context context) {
    if (uriRequest == null) {
        throw new IllegalArgumentException("HttpUriRequest must not be null");
    }

    if (responseHandler == null) {
        throw new IllegalArgumentException("ResponseHandler must not be null");
    }

    if (responseHandler.getUseSynchronousMode() && !responseHandler.getUsePoolThread()) {
        throw new IllegalArgumentException(
                "Synchronous ResponseHandler used in AsyncHttpClient. You should create your response handler in a looper thread or use SyncHttpClient instead.");
    }

    if (contentType != null) {
        if (uriRequest instanceof HttpEntityEnclosingRequestBase
                && ((HttpEntityEnclosingRequestBase) uriRequest).getEntity() != null
                && uriRequest.containsHeader(HEADER_CONTENT_TYPE)) {
            Log.w(LOG_TAG, "Passed contentType will be ignored because HttpEntity sets content type");
        } else {
            uriRequest.setHeader(HEADER_CONTENT_TYPE, contentType);
        }
    }

    responseHandler.setRequestHeaders(uriRequest.getAllHeaders());
    responseHandler.setRequestURI(uriRequest.getURI());

    AsyncHttpRequest request = newAsyncHttpRequest(client, httpContext, uriRequest, contentType,
            responseHandler, context);
    threadPool.submit(request);
    RequestHandle requestHandle = new RequestHandle(request);

    if (context != null) {
        List<RequestHandle> requestList;
        // Add request to request map
        synchronized (requestMap) {
            requestList = requestMap.get(context);
            if (requestList == null) {
                requestList = Collections.synchronizedList(new LinkedList<RequestHandle>());
                requestMap.put(context, requestList);
            }
        }

        requestList.add(requestHandle);

        Iterator<RequestHandle> iterator = requestList.iterator();
        while (iterator.hasNext()) {
            if (iterator.next().shouldBeGarbageCollected()) {
                iterator.remove();
            }
        }
    }

    return requestHandle;
}

From source file:org.xmlsh.sh.shell.Shell.java

public void addChildProcess(Process proc) {
    if (mChildProcess == null) {
        synchronized (this) {
            if (mChildProcess == null)
                mChildProcess = Collections.synchronizedList(new ArrayList<Process>());
        }/*w w w.j ava 2s.  c o  m*/
    }
    synchronized (mChildProcess) {
        mChildProcess.add(proc);
    }
}

From source file:org.apache.hadoop.hive.ql.metadata.Hive.java

/**
 * Load a directory into a Hive Table Partition - Alters existing content of
 * the partition with the contents of loadPath. - If the partition does not
 * exist - one is created - files in loadPath are moved into Hive. But the
 * directory itself is not removed./*from  w  ww .  ja v a2s . co m*/
 *
 * @param loadPath
 *          Directory containing files to load into Table
 * @param  tbl
 *          name of table to be loaded.
 * @param partSpec
 *          defines which partition needs to be loaded
 * @param replace
 *          if true - replace files in the partition, otherwise add files to
 *          the partition
 * @param inheritTableSpecs if true, on [re]creating the partition, take the
 *          location/inputformat/outputformat/serde details from table spec
 * @param isSrcLocal
 *          If the source directory is LOCAL
 * @param isAcid
 *          true if this is an ACID operation
 * @param hasFollowingStatsTask
 *          true if there is a following task which updates the stats, so, this method need not update.
 * @return Partition object being loaded with data
 */
public Partition loadPartition(Path loadPath, Table tbl, Map<String, String> partSpec, boolean replace,
        boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid,
        boolean hasFollowingStatsTask) throws HiveException {

    Path tblDataLocationPath = tbl.getDataLocation();
    try {
        // Get the partition object if it already exists
        Partition oldPart = getPartition(tbl, partSpec, false);
        /**
         * Move files before creating the partition since down stream processes
         * check for existence of partition in metadata before accessing the data.
         * If partition is created before data is moved, downstream waiting
         * processes might move forward with partial data
         */

        Path oldPartPath = (oldPart != null) ? oldPart.getDataLocation() : null;
        Path newPartPath = null;

        if (inheritTableSpecs) {
            Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec));
            newPartPath = new Path(tblDataLocationPath.toUri().getScheme(),
                    tblDataLocationPath.toUri().getAuthority(), partPath.toUri().getPath());

            if (oldPart != null) {
                /*
                 * If we are moving the partition across filesystem boundaries
                 * inherit from the table properties. Otherwise (same filesystem) use the
                 * original partition location.
                 *
                 * See: HIVE-1707 and HIVE-2117 for background
                 */
                FileSystem oldPartPathFS = oldPartPath.getFileSystem(getConf());
                FileSystem loadPathFS = loadPath.getFileSystem(getConf());
                if (FileUtils.equalsFileSystem(oldPartPathFS, loadPathFS)) {
                    newPartPath = oldPartPath;
                }
            }
        } else {
            newPartPath = oldPartPath;
        }
        List<Path> newFiles = null;
        PerfLogger perfLogger = SessionState.getPerfLogger();
        perfLogger.PerfLogBegin("MoveTask", "FileMoves");

        // If config is set, table is not temporary and partition being inserted exists, capture
        // the list of files added. For not yet existing partitions (insert overwrite to new partition
        // or dynamic partition inserts), the add partition event will capture the list of files added.
        if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && (null != oldPart)) {
            newFiles = Collections.synchronizedList(new ArrayList<Path>());
        }

        if (replace || (oldPart == null && !isAcid)) {
            boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
            replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), isSrcLocal, isAutoPurge,
                    newFiles);
        } else {
            FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
            Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles);
        }
        perfLogger.PerfLogEnd("MoveTask", "FileMoves");
        Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath);
        alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs,
                newPartPath.toString());
        validatePartition(newTPart);

        // Generate an insert event only if inserting into an existing partition
        // When inserting into a new partition, the add partition event takes care of insert event
        if ((null != oldPart) && (null != newFiles)) {
            fireInsertEvent(tbl, partSpec, replace, newFiles);
        } else {
            LOG.debug("No new files were created, and is not a replace, or we're inserting into a "
                    + "partition that does not exist yet. Skipping generating INSERT event.");
        }

        // column stats will be inaccurate
        StatsSetupConst.clearColumnStatsState(newTPart.getParameters());

        // recreate the partition if it existed before
        if (isSkewedStoreAsSubdir) {
            org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition();
            SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo();
            /* Construct list bucketing location mappings from sub-directory name. */
            Map<List<String>, String> skewedColValueLocationMaps = constructListBucketingLocationMap(
                    newPartPath, skewedInfo);
            /* Add list bucketing location mappings. */
            skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps);
            newCreatedTpart.getSd().setSkewedInfo(skewedInfo);
        }
        if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
            StatsSetupConst.setBasicStatsState(newTPart.getParameters(), StatsSetupConst.FALSE);
        }
        if (oldPart == null) {
            newTPart.getTPartition().setParameters(new HashMap<String, String>());
            if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
                StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters(), null,
                        StatsSetupConst.TRUE);
            }
            MetaStoreUtils.populateQuickStats(
                    HiveStatsUtils.getFileStatusRecurse(newPartPath, -1, newPartPath.getFileSystem(conf)),
                    newTPart.getParameters());
            try {
                LOG.debug("Adding new partition " + newTPart.getSpec());
                getSychronizedMSC().add_partition(newTPart.getTPartition());
            } catch (AlreadyExistsException aee) {
                // With multiple users concurrently issuing insert statements on the same partition has
                // a side effect that some queries may not see a partition at the time when they're issued,
                // but will realize the partition is actually there when it is trying to add such partition
                // to the metastore and thus get AlreadyExistsException, because some earlier query just created it (race condition).
                // For example, imagine such a table is created:
                //  create table T (name char(50)) partitioned by (ds string);
                // and the following two queries are launched at the same time, from different sessions:
                //  insert into table T partition (ds) values ('Bob', 'today'); -- creates the partition 'today'
                //  insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException
                // In that case, we want to retry with alterPartition.
                LOG.debug("Caught AlreadyExistsException, trying to alter partition instead");
                setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
            } catch (Exception e) {
                try {
                    final FileSystem newPathFileSystem = newPartPath.getFileSystem(this.getConf());
                    boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
                    final FileStatus status = newPathFileSystem.getFileStatus(newPartPath);
                    Hive.trashFiles(newPathFileSystem, new FileStatus[] { status }, this.getConf(),
                            isAutoPurge);
                } catch (IOException io) {
                    LOG.error("Could not delete partition directory contents after failed partition creation: ",
                            io);
                }
                throw e;
            }
        } else {
            setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
        }
        return newTPart;
    } catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (MetaException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (InvalidOperationException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    } catch (TException e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new HiveException(e);
    }
}

From source file:org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater.java

@Test
public void testConcurrentAccessToSystemCredentials() {
    final Map<ApplicationId, ByteBuffer> testCredentials = new HashMap<>();
    ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[300]);
    ApplicationId applicationId = ApplicationId.newInstance(123456, 120);
    testCredentials.put(applicationId, byteBuffer);

    final List<Throwable> exceptions = Collections.synchronizedList(new ArrayList<Throwable>());

    final int NUM_THREADS = 10;
    final CountDownLatch allDone = new CountDownLatch(NUM_THREADS);
    final ExecutorService threadPool = Executors.newFixedThreadPool(NUM_THREADS);

    final AtomicBoolean stop = new AtomicBoolean(false);

    try {//from  w  w w.j  a v a 2 s  . c  om
        for (int i = 0; i < NUM_THREADS; i++) {
            threadPool.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        for (int i = 0; i < 100 && !stop.get(); i++) {
                            NodeHeartbeatResponse nodeHeartBeatResponse = newNodeHeartbeatResponse(0,
                                    NodeAction.NORMAL, null, null, null, null, 0);
                            nodeHeartBeatResponse.setSystemCredentialsForApps(testCredentials);
                            NodeHeartbeatResponseProto proto = ((NodeHeartbeatResponsePBImpl) nodeHeartBeatResponse)
                                    .getProto();
                            Assert.assertNotNull(proto);
                        }
                    } catch (Throwable t) {
                        exceptions.add(t);
                        stop.set(true);
                    } finally {
                        allDone.countDown();
                    }
                }
            });
        }

        int testTimeout = 2;
        Assert.assertTrue("Timeout waiting for more than " + testTimeout + " " + "seconds",
                allDone.await(testTimeout, TimeUnit.SECONDS));
    } catch (InterruptedException ie) {
        exceptions.add(ie);
    } finally {
        threadPool.shutdownNow();
    }
    Assert.assertTrue("Test failed with exception(s)" + exceptions, exceptions.isEmpty());
}

From source file:com.gargoylesoftware.htmlunit.WebClient.java

/**
 * When we deserialize, re-initializie transient fields.
 * @param in the object input stream/* w w w .  jav  a2s . co  m*/
 * @throws IOException if an error occurs
 * @throws ClassNotFoundException if an error occurs
 */
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
    in.defaultReadObject();

    webConnection_ = createWebConnection();
    scriptEngine_ = new JavaScriptEngine(this);
    jobManagers_ = Collections.synchronizedList(new ArrayList<WeakReference<JavaScriptJobManager>>());

    if (getBrowserVersion().hasFeature(JS_XML_SUPPORT_VIA_ACTIVEXOBJECT)) {
        initMSXMLActiveX();
    }
}