Example usage for java.lang Object wait

List of usage examples for java.lang Object wait

Introduction

In this page you can find the example usage for java.lang Object wait.

Prototype

public final native void wait(long timeoutMillis) throws InterruptedException;

Source Link

Document

Causes the current thread to wait until it is awakened, typically by being notified or interrupted, or until a certain amount of real time has elapsed.

Usage

From source file:it.geosolutions.tools.io.file.IOUtils.java

/**
 * This method is responsible for checking if the input file is still being
 * written or if its available for being parsed.
 * //  w w  w .ja  v a2  s .  co  m
 * <p>
 * Specifically this method tries to open up a "rw" channel on the provided
 * input file. If the file is still being written this operation fails with
 * an exception, we therefore catch this exception and sleep for
 * {@value #ATOMIC_WAIT} seconds as defined in the constant
 * {@link #ATOMIC_WAIT}.
 * 
 * <p>
 * If after having waited for {@link MAX_WAITING_TIME_FOR_LOCK} (which is
 * read from the configuration or set to the default value of
 * {@link #DEFAULT_WAITING_TIME}) we have not yet acquired the channel we
 * skip this file but we signal this situation.
 * 
 * NOTE: To make use of mandatory locks, mandatory locking must be enabled
 * both on the file system that contains the file to be locked, and on the
 * file itself. Mandatory locking is enabled on a file system using the
 * "-o mand" option to mount(8), or the MS_MANDLOCK flag for mount(2).
 * Mandatory locking is enabled on a file by disabling group execute
 * permission on the file and enabling the set-group-ID permission bit (see
 * chmod(1) and chmod(2)).
 * 
 * @param caller
 * @param inputFile
 * @param maxwait
 * @return <code>true</code> if the lock has been successfully acquired.
 *         <code>false</code> otherwise
 * @throws InterruptedException
 * @throws IOException
 */
public static boolean acquireLock(Object caller, File inputFile, final long maxwait)
        throws InterruptedException, IOException {

    if (!inputFile.exists())
        return false;// file not exists!

    if (inputFile.isDirectory()) {
        // return inputFile.setReadOnly();
        return true;// cannot lock directory
    }

    // //
    //
    // Acquire an exclusive lock to wait for long
    // writing processes before trying to check on them
    //
    // //
    double sumWait = 0;
    while (true) {
        FileOutputStream outStream = null;
        FileChannel channel = null;
        FileLock lock = null;
        try {
            outStream = new FileOutputStream(inputFile, true);

            // get a rw channel
            channel = outStream.getChannel();
            if (channel != null) {
                // here we could block
                lock = channel.tryLock();
                if (lock != null) {
                    if (LOGGER.isTraceEnabled())
                        LOGGER.trace("File locked successfully");
                    return true;
                }
            }
        } catch (OverlappingFileLockException e) {
            // File is already locked in this thread or virtual machine
            if (LOGGER.isDebugEnabled())
                LOGGER.debug("File is already locked in this thread or virtual machine");
        } catch (Exception e) {
            if (LOGGER.isDebugEnabled())
                LOGGER.debug(e.getLocalizedMessage(), e);
        } finally {

            org.apache.commons.io.IOUtils.closeQuietly(outStream);

            // release the lock
            if (lock != null)
                try {
                    lock.release();
                } catch (Exception e) {
                    // eat me
                }

            if (channel != null)
                try {
                    channel.close();
                } catch (Exception e) {
                    // eat me
                }
        }

        // Sleep for ATOMIC_WAIT milliseconds prior to retry for acquiring
        // the lock
        synchronized (caller) {
            caller.wait(Conf.ATOMIC_WAIT);
        }

        sumWait += Conf.ATOMIC_WAIT;
        if (sumWait > maxwait) {
            if (LOGGER.isWarnEnabled())
                LOGGER.warn("Waiting time beyond maximum specified waiting time, exiting...");
            // Quitting the loop
            break;
        }
    }

    // A time greater than MAX_WAITING_TIME_FOR_LOCK has elapsed and no lock
    // has
    // been acquired. Thus, I need to return false
    return false;
}

From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java

/**
 * Waits for ISPN lock of resource to release the lock. You can specify a timeout (in milliseconds) to let this waiting
 * can be timeout. Not that this method only wait for different threads which holds the locker of the path, if it is same
 * thread, will directly go through it for following oeprations
 *
 * @param resource the resource can supply the key of the ISPN locker
 * @param locked only when this param is true, this method will try to do waiting.
 * @param timeout a timeout (in milliseconds) to let the wait not blocked forever.
 * @throws IOException when timeout, a IOException will be thrown to notify
 *//*from   ww w.  ja v a2s  . c  om*/
private void waitForISPNLock(ConcreteResource resource, boolean locked, long timeout) throws IOException {
    final String path;
    try {
        path = getKeyForResource(resource);
    } catch (IOException e) {
        final String errorMsg = String.format(
                "[galley] When get NFS cache key for resource: %s, got I/O error.", resource.toString());
        logger.error(errorMsg, e);
        throw new IllegalStateException(errorMsg, e);
    }

    if (fileManager.isLockedByCurrentThread(new File(path))) {
        logger.trace("Processing in same thread, will not wait for ISPN lock to make it re-entrant");
        return;
    }

    final boolean needTimeout = timeout > 0;
    final long WAIT_INTERVAL = 1000;
    long timeDuration = 0;
    while (locked) {
        // Use ISPN lock owner for resource to wait until lock is released. Note that if the lock has no owner,
        // means lock has been released
        final Object owner = nfsOwnerCache.getLockOwner(path);
        if (owner == null) {
            break;
        }

        logger.trace(
                "ISPN lock still not released. ISPN lock key:{}, locker: {}, operation path: {}. Waiting for 1 seconds",
                path, owner, resource);

        if (needTimeout && timeDuration > timeout) {
            throw new IOException(String.format(
                    "ISPN lock timeout after %d Milliseconds! The ISPN lock owner is %s, and lock key is %s",
                    timeout, owner, path));
        } else {
            try {
                synchronized (owner) {
                    owner.wait(WAIT_INTERVAL);
                    timeDuration += WAIT_INTERVAL;
                }
            } catch (final InterruptedException e) {
                Thread.currentThread().interrupt();
            }
        }
    }
}

From source file:org.apache.hadoop.hive.ql.exec.tez.DagUtils.java

public boolean checkOrWaitForTheFile(FileSystem srcFs, Path src, Path dest, Configuration conf, Object notifier,
        int waitAttempts, long sleepInterval, boolean doLog) throws IOException {
    for (int i = 0; i < waitAttempts; i++) {
        if (checkPreExisting(srcFs, src, dest, conf))
            return true;
        if (doLog && i == 0) {
            LOG.info("Waiting for the file " + dest + " (" + waitAttempts + " attempts, with " + sleepInterval
                    + "ms interval)");
        }/*from  w w  w .j av a2  s  .c om*/
        try {
            if (notifier != null) {
                // The writing thread has given us an object to wait on.
                synchronized (notifier) {
                    notifier.wait(sleepInterval);
                }
            } else {
                // Some other process is probably writing the file. Just sleep.
                Thread.sleep(sleepInterval);
            }
        } catch (InterruptedException interruptedException) {
            throw new IOException(interruptedException);
        }
    }
    return checkPreExisting(srcFs, src, dest, conf); // One last check.
}

From source file:com.safi.asterisk.handler.connection.AbstractConnectionManager.java

public Object getLoopbackCall(String uuid) {
    Object lock = null;
    synchronized (loopbackCallLock) {
        lock = loopbackCallLock.get(uuid);
        if (lock instanceof Object[]) {
            if (SafletEngine.debuggerLog.isDebugEnabled())
                SafletEngine.debuggerLog.error("Got my loopbacklock info for " + uuid);
            return (Object[]) loopbackCallLock.remove(uuid);
        }//from w  w  w  .  j  a  v a2 s. co  m
    }
    if (lock == null) {
        if (SafletEngine.debuggerLog.isDebugEnabled())
            SafletEngine.debuggerLog.error("Loopbacklock info for " + uuid + " was null!");
        return null;
    }
    synchronized (lock) {
        try {
            if (SafletEngine.debuggerLog.isDebugEnabled())
                SafletEngine.debuggerLog.error("Loopback hasn't arrived for " + uuid + " so i'm waiting!");
            if (lock instanceof Long)
                lock.wait(((Long) lock).longValue());
            else
                lock.wait(LOOPBACK_TIMEOUT);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
    if (SafletEngine.debuggerLog.isDebugEnabled())
        SafletEngine.debuggerLog.error("I've woken up for " + uuid + " so returning");
    return loopbackCallLock.remove(uuid);
}

From source file:com.bt.aloha.sipp.SippEngineTestHelper.java

private void setUpSipp(String scenarioName, File directory, boolean respondToOriginatingAddress)
        throws IOException {

    // Give some time to settle between sipp calls.
    try {/*from  w w  w .  j  a v  a2 s. co  m*/
        Thread.sleep(TWO_THOUSAND);
    } catch (InterruptedException e) {
    }

    Properties props = new Properties();
    props.load(getClass().getResourceAsStream("/sipp.properties"));

    final String sippPath = props.getProperty("sipp.home") + "/sipp";
    String localIpAddress = setIpAddress(props.getProperty("sip.stack.ip.address.pattern"));
    port = Integer.parseInt(props.getProperty("sipp.local.port"));
    String localPortOption = props.getProperty("sipp.local.port") == null ? "" : String.format("-p %s", port);
    String remoteAddressOption = respondToOriginatingAddress ? ""
            : String.format("-rsa %s:%s", localIpAddress, props.getProperty("sip.stack.port"));
    String runTimesOption = "-m 1";
    String remoteAddressPort = respondToOriginatingAddress ? localIpAddress
            : String.format("%s:%s", localIpAddress, props.getProperty("sip.stack.port"));
    String cmdLine = String.format("%s %s %s %s %s %s", sippPath, remoteAddressOption, runTimesOption,
            scenarioName, remoteAddressPort, localPortOption);
    log.debug(cmdLine);

    System.out.println("COMMAND LINE:");
    System.out.println("cd " + directory.getAbsolutePath());
    System.out.println(cmdLine);
    process = Runtime.getRuntime().exec(cmdLine, null, directory);

    final BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream()));
    final OutputStream out = process.getOutputStream();
    final BufferedReader err = new BufferedReader(new InputStreamReader(process.getErrorStream()));

    timer = new Timer(false);
    timer.schedule(new TimerTask() {

        @Override
        public void run() {
            process.destroy();
        }

    }, 30000);

    final Object event = new Object();
    new Thread() {
        public void run() {
            try {
                String line;
                while ((line = err.readLine()) != null) {
                    // while (err.ready() && (line = err.readLine()) !=
                    // null) {
                    errSB.append(line);
                }
                err.close();
            } catch (IOException e) {
                log.debug("Unable to read the error stream from sipp", e);
            }
        }
    }.start();

    new Thread() {
        public void run() {
            try {
                String line;
                while ((line = in.readLine()) != null) {
                    // while (in.ready() && (line = in.readLine()) != null)
                    // {
                    if (line.contains("Terminated")) {
                        break;
                    }

                    if (port == -1 && line.contains("Scenario Screen")) {
                        line = in.readLine();
                        String pattern;
                        int group;

                        if (line.contains("Transport")) {
                            pattern = "(\\d+)";
                            group = 1;
                        } else if (line.contains("Remote-host")) {
                            pattern = "(.*?\\ds.*?)(\\d+)";
                            group = 2;
                        } else
                            continue;

                        line = in.readLine();
                        final Pattern pat = Pattern.compile(pattern);
                        Matcher matcher = pat.matcher(line);
                        matcher.find();
                        port = Integer.parseInt(matcher.group(group));

                        synchronized (event) {
                            event.notify();
                        }
                    }
                }
                in.close();
                out.close();
            } catch (IOException e) {
                log.debug("Unable to read the input stream from sipp", e);
            }
        }
    }.start();

    synchronized (event) {
        try {
            event.wait(FIVE_THOUSAND);
        } catch (InterruptedException e) {
        }
    }

    if (port == -1)
        throw new IOException("Error reading sipp port");

    System.out.println("Running sipp at " + getSippAddress());
}

From source file:edu.cmu.graphchi.engine.HypergraphChiEngine.java

private void loadBeforeUpdates(int interval, final ChiVertex<VertexDataType, EdgeDataType>[] vertices,
        final MemoryShard<EdgeDataType> memShard, final int startVertex, final int endVertex)
        throws IOException {
    final Object terminationLock = new Object();
    final TimerContext _timer = loadTimer.time();
    // TODO: make easier to read
    synchronized (terminationLock) {

        final AtomicInteger countDown = new AtomicInteger(disableOutEdges ? 1 : nShards);

        if (!disableInEdges) {
            try {

                logger.info("Memshard: " + startVertex + " -- " + endVertex);
                memShard.loadVertices(startVertex, endVertex, vertices, disableOutEdges, parallelExecutor);
                logger.info("Loading memory-shard finished." + Thread.currentThread().getName());

                if (countDown.decrementAndGet() == 0) {
                    synchronized (terminationLock) {
                        terminationLock.notifyAll();
                    }/*w  w  w  . j  a  v  a  2  s  .c  om*/
                }
            } catch (IOException ioe) {
                ioe.printStackTrace();
                throw new RuntimeException(ioe);
            } catch (Exception err) {
                err.printStackTrace();
            }
        }

        /* Load in parallel */
        if (!disableOutEdges) {
            for (int p = 0; p < nShards; p++) {
                if (p != interval || disableInEdges) {
                    final int _p = p;
                    final SlidingShard<EdgeDataType> shard = slidingShards.get(p);
                    loadingExecutor.submit(new Runnable() {

                        public void run() {
                            try {
                                shard.readNextVertices(vertices, startVertex, false);
                                if (countDown.decrementAndGet() == 0) {
                                    synchronized (terminationLock) {
                                        terminationLock.notifyAll();
                                    }
                                }

                            } catch (IOException ioe) {
                                ioe.printStackTrace();
                                throw new RuntimeException(ioe);
                            } catch (Exception err) {
                                err.printStackTrace();
                            }
                        }
                    });
                }
            }
        }

        // barrier
        try {
            while (countDown.get() > 0) {
                terminationLock.wait(5000);
                if (countDown.get() > 0) {
                    logger.info("Still waiting for loading, counter is: " + countDown.get());
                }
            }
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
    _timer.stop();
}

From source file:org.gridgain.grid.kernal.managers.eventstorage.GridEventStorageManager.java

/**
 * @param p Grid event predicate./*from  ww w. jav a 2 s .  c om*/
 * @param nodes Collection of nodes.
 * @param timeout Maximum time to wait for result, if {@code 0}, then wait until result is received.
 * @return Collection of events.
 * @throws GridException Thrown in case of any errors.
 */
@SuppressWarnings({ "SynchronizationOnLocalVariableOrMethodParameter", "deprecation" })
private List<GridEvent> query(GridPredicate<? super GridEvent> p, Collection<? extends GridNode> nodes,
        long timeout) throws GridException {
    assert p != null;
    assert nodes != null;

    if (nodes.isEmpty()) {
        U.warn(log, "Failed to query events for empty nodes collection.");

        return Collections.emptyList();
    }

    GridIoManager ioMgr = ctx.io();

    final List<GridEvent> evts = new ArrayList<GridEvent>();

    final AtomicReference<Throwable> err = new AtomicReference<Throwable>(null);

    final Set<UUID> uids = new HashSet<UUID>();

    final Object qryMux = new Object();

    for (GridNode node : nodes)
        uids.add(node.id());

    GridLocalEventListener evtLsnr = new GridLocalEventListener() {
        @Override
        public void onEvent(GridEvent evt) {
            assert evt instanceof GridDiscoveryEvent;

            synchronized (qryMux) {
                uids.remove(((GridDiscoveryEvent) evt).eventNodeId());

                if (uids.isEmpty()) {
                    qryMux.notifyAll();
                }
            }
        }
    };

    GridMessageListener resLsnr = new GridMessageListener() {
        @SuppressWarnings("deprecation")
        @Override
        public void onMessage(UUID nodeId, Object msg) {
            assert nodeId != null;
            assert msg != null;

            if (!(msg instanceof GridEventStorageMessage)) {
                U.error(log, "Received unknown message: " + msg);

                return;
            }

            GridEventStorageMessage res = (GridEventStorageMessage) msg;

            synchronized (qryMux) {
                if (uids.remove(nodeId)) {
                    if (res.events() != null)
                        evts.addAll(res.events());
                } else
                    U.warn(log,
                            "Received duplicate response (ignoring) [nodeId=" + nodeId + ", msg=" + res + ']');

                if (res.exception() != null)
                    err.set(res.exception());

                if (uids.isEmpty() || err.get() != null)
                    qryMux.notifyAll();
            }
        }
    };

    String resTopic = TOPIC_EVENT.name(UUID.randomUUID());

    try {
        addLocalEventListener(evtLsnr, new int[] { EVT_NODE_LEFT, EVT_NODE_FAILED });

        ioMgr.addMessageListener(resTopic, resLsnr);

        GridByteArrayList serFilter = U.marshal(ctx.config().getMarshaller(), p);

        GridDeployment dep = ctx.deploy().deploy(p.getClass(), U.detectClassLoader(p.getClass()));

        if (dep == null)
            throw new GridDeploymentException("Failed to deploy event filter: " + p);

        Serializable msg = new GridEventStorageMessage(resTopic, serFilter, p.getClass().getName(),
                dep.classLoaderId(), dep.deployMode(), dep.sequenceNumber(), dep.userVersion(),
                dep.participants());

        ioMgr.send(nodes, TOPIC_EVENT, msg, PUBLIC_POOL);

        if (timeout == 0)
            timeout = Long.MAX_VALUE;

        long now = System.currentTimeMillis();

        // Account for overflow of long value.
        long endTime = now + timeout <= 0 ? Long.MAX_VALUE : now + timeout;

        long delta = timeout;

        Collection<UUID> uidsCp = null;

        synchronized (qryMux) {
            try {
                while (!uids.isEmpty() && err.get() == null && delta > 0) {
                    qryMux.wait(delta);

                    delta = endTime - System.currentTimeMillis();
                }
            } catch (InterruptedException e) {
                throw new GridException("Got interrupted while waiting for event query responses.", e);
            }

            if (err.get() != null)
                throw new GridException("Failed to query events due to exception on remote node.", err.get());

            if (!uids.isEmpty())
                uidsCp = new LinkedList<UUID>(uids);
        }

        // Outside of synchronization.
        if (uidsCp != null) {
            for (Iterator<UUID> iter = uidsCp.iterator(); iter.hasNext();)
                // Ignore nodes that have left the grid.
                if (ctx.discovery().node(iter.next()) == null)
                    iter.remove();

            if (!uidsCp.isEmpty())
                throw new GridException(
                        "Failed to receive event query response from following nodes: " + uidsCp);
        }
    } finally {
        ioMgr.removeMessageListener(resTopic, resLsnr);

        removeLocalEventListener(evtLsnr);
    }

    return evts;
}

From source file:org.apache.hadoop.hdfs.TestAutoEditRollWhenAvatarFailover.java

/**
 * Test if we can get block locations after killing primary avatar,
 * failing over to standby avatar (making it the new primary),
 * restarting a new standby avatar, killing the new primary avatar and
 * failing over to the restarted standby.
 * /*w  ww .j a  v  a2 s .co  m*/
 * Write logs for a while to make sure automatic rolling are triggered.
 */
@Test
public void testDoubleFailOverWithAutomaticRoll() throws Exception {
    setUp(false, "testDoubleFailOverWithAutomaticRoll");

    // To make sure it's never the case that both primary and standby
    // issue rolling, we use a injection handler. 
    final AtomicBoolean startKeepThread = new AtomicBoolean(true);
    final AtomicInteger countAutoRolled = new AtomicInteger(0);
    final AtomicBoolean needFail = new AtomicBoolean(false);
    final AtomicLong currentThreadId = new AtomicLong(-1);
    final Object waitFor10Rolls = new Object();
    InjectionHandler.set(new InjectionHandler() {
        @Override
        protected void _processEvent(InjectionEventI event, Object... args) {
            if (event == InjectionEvent.FSEDIT_AFTER_AUTOMATIC_ROLL) {
                countAutoRolled.incrementAndGet();
                if (countAutoRolled.get() >= 10) {
                    synchronized (waitFor10Rolls) {
                        waitFor10Rolls.notifyAll();
                    }
                }

                if (!startKeepThread.get()) {
                    currentThreadId.set(-1);
                } else if (currentThreadId.get() == -1) {
                    currentThreadId.set(Thread.currentThread().getId());
                } else if (currentThreadId.get() != Thread.currentThread().getId()) {
                    LOG.warn("[Thread " + Thread.currentThread().getId() + "] expected: " + currentThreadId);
                    needFail.set(true);
                }

                LOG.info("[Thread " + Thread.currentThread().getId() + "] finish automatic log rolling, count "
                        + countAutoRolled.get());

                // Increase the rolling time a little bit once after 7 auto rolls 
                if (countAutoRolled.get() % 7 == 3) {
                    DFSTestUtil.waitNMilliSecond(75);
                }
            }
        }
    });

    FileSystem fs = cluster.getFileSystem();

    // Add some transactions during a period of time before failing over.
    long startTime = System.currentTimeMillis();
    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(100);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() >= 10) {
            LOG.info("Automatic rolled 10 times.");
            long duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 4500);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    // Tune the rolling timeout temporarily to avoid race conditions
    // only triggered in tests
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);

    LOG.info("================== killing primary 1");

    cluster.killPrimary();

    // Fail over and make sure after fail over, automatic edits roll still
    // will happen.
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);
    LOG.info("================== failing over 1");
    cluster.failOver();
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== restarting standby");
    cluster.restartStandby();
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== Finish restarting standby");

    // Wait for automatic rolling happens if there is no new transaction.
    startKeepThread.set(true);

    startTime = System.currentTimeMillis();
    long waitDeadLine = startTime + 20000;
    synchronized (waitFor10Rolls) {
        while (System.currentTimeMillis() < waitDeadLine && countAutoRolled.get() < 10) {
            waitFor10Rolls.wait(waitDeadLine - System.currentTimeMillis());
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);
    long duration = System.currentTimeMillis() - startTime;
    TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs", duration > 9000);

    // failover back 
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);

    LOG.info("================== killing primary 2");
    cluster.killPrimary();
    LOG.info("================== failing over 2");
    cluster.failOver();

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);

    // Make sure after failover back, automatic rolling can still happen.
    startKeepThread.set(true);

    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(200);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() > 10) {
            LOG.info("Automatic rolled 10 times.");
            duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 9000);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    InjectionHandler.clear();

    if (needFail.get()) {
        TestCase.fail("Automatic rolling doesn't happen in the same thread when should.");
    }
}

From source file:edu.cmu.graphchi.engine.HypergraphChiEngine.java

private void execUpdates(final HypergraphChiProgram<VertexDataType, EdgeDataType> program,
        final ChiVertex<VertexDataType, EdgeDataType>[] vertices) {
    if (vertices == null || vertices.length == 0)
        return;// w w  w  .ja v a 2 s.  c om
    TimerContext _timer = executionTimer.time();
    if (Runtime.getRuntime().availableProcessors() == 1) {
        /* Sequential updates */
        for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) {
            if (vertex != null) {
                nupdates++;
                hypergraphUpdate(program, vertex, chiContext);
                // program.update(vertex, chiContext);
            }
        }
    } else {
        final Object termlock = new Object();
        final int chunkSize = 1 + vertices.length / 64;

        final int nWorkers = vertices.length / chunkSize + 1;
        final AtomicInteger countDown = new AtomicInteger(1 + nWorkers);

        if (!enableDeterministicExecution) {
            for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) {
                if (vertex != null)
                    vertex.parallelSafe = true;
            }
        }

        /* Parallel updates. One thread for non-parallel safe updates, others
        updated in parallel. This guarantees deterministic execution. */

        /* Non-safe updates */
        parallelExecutor.submit(new Runnable() {
            public void run() {
                int thrupdates = 0;
                GraphChiContext threadContext = chiContext.clone(0);

                try {
                    for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) {
                        if (vertex != null && !vertex.parallelSafe) {
                            thrupdates++;
                            hypergraphUpdate(program, vertex, threadContext);
                            //program.update(vertex, threadContext);
                        }
                    }

                } catch (Exception e) {
                    e.printStackTrace();
                } finally {
                    int pending = countDown.decrementAndGet();
                    synchronized (termlock) {
                        nupdates += thrupdates;
                        if (pending == 0) {
                            termlock.notifyAll();
                            ;
                        }
                    }
                }
            }
        });

        /* Parallel updates */
        for (int thrId = 0; thrId < nWorkers; thrId++) {
            final int myId = thrId;
            final int chunkStart = myId * chunkSize;
            final int chunkEnd = chunkStart + chunkSize;

            parallelExecutor.submit(new Runnable() {

                public void run() {
                    int thrupdates = 0;
                    GraphChiContext threadContext = chiContext.clone(1 + myId);

                    try {
                        int end = chunkEnd;
                        if (end > vertices.length)
                            end = vertices.length;
                        for (int i = chunkStart; i < end; i++) {
                            ChiVertex<VertexDataType, EdgeDataType> vertex = vertices[i];
                            if (vertex != null && vertex.parallelSafe) {
                                thrupdates++;
                                hypergraphUpdate(program, vertex, threadContext);
                                //program.update(vertex, threadContext);
                            }
                        }

                    } catch (Exception e) {
                        e.printStackTrace();
                    } finally {
                        int pending = countDown.decrementAndGet();
                        synchronized (termlock) {
                            nupdates += thrupdates;
                            if (pending == 0) {
                                termlock.notifyAll();
                            }
                        }
                    }
                }
            });
        }
        synchronized (termlock) {
            while (countDown.get() > 0) {
                try {
                    termlock.wait(1500);
                } catch (InterruptedException e) {
                    // What to do?
                    e.printStackTrace();
                }
                if (countDown.get() > 0)
                    logger.info("Waiting for execution to finish: countDown:" + countDown.get());
            }
        }

    }
    _timer.stop();
}

From source file:com.gemstone.gemfire.internal.cache.OplogJUnitTest.java

/**
 * Tests reduction in size of disk stats 
 * when the oplog is rolled./*w  w  w.j  a v a 2s  . com*/
 */
@Test
public void testStatsSizeReductionOnRolling() throws Exception {
    final int MAX_OPLOG_SIZE = 500 * 2;
    diskProps.setMaxOplogSize(MAX_OPLOG_SIZE);
    diskProps.setPersistBackup(true);
    diskProps.setRolling(true);
    diskProps.setCompactionThreshold(100);
    diskProps.setSynchronous(true);
    diskProps.setOverflow(false);
    diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 4000 });
    final byte[] val = new byte[333];
    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
    final DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    final Object lock = new Object();
    final boolean[] exceptionOccured = new boolean[] { true };
    final boolean[] okToExit = new boolean[] { false };
    final boolean[] switchExpected = new boolean[] { false };

    // calculate sizes
    final int extra_byte_num_per_entry = InternalDataSerializer
            .calculateBytesForTSandDSID(getDSID((LocalRegion) region));
    final int key3_size = DiskOfflineCompactionJUnitTest.getSize4Create(extra_byte_num_per_entry, "key3", val);
    final int tombstone_key1 = DiskOfflineCompactionJUnitTest.getSize4TombstoneWithKey(extra_byte_num_per_entry,
            "key1");
    final int tombstone_key2 = DiskOfflineCompactionJUnitTest.getSize4TombstoneWithKey(extra_byte_num_per_entry,
            "key2");

    CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() {
        private long before = -1;
        private DirectoryHolder dh = null;
        private long oplogsSize = 0;

        @Override
        public void beforeSwitchingOplog() {
            cache.getLogger().info("beforeSwitchingOplog");
            if (!switchExpected[0]) {
                fail("unexpected oplog switch");
            }
            if (before == -1) {
                // only want to call this once; before the 1st oplog destroy
                this.dh = dr.getNextDir();
                this.before = this.dh.getDirStatsDiskSpaceUsage();
            }
        }

        @Override
        public void beforeDeletingCompactedOplog(Oplog oplog) {
            cache.getLogger().info("beforeDeletingCompactedOplog");
            oplogsSize += oplog.getOplogSize();
        }

        @Override
        public void afterHavingCompacted() {
            cache.getLogger().info("afterHavingCompacted");
            if (before > -1) {
                synchronized (lock) {
                    okToExit[0] = true;
                    long after = this.dh.getDirStatsDiskSpaceUsage();
                    // after compaction, in _2.crf, key3 is an create-entry, 
                    // key1 and key2 are tombstones. 
                    // _2.drf contained a rvvgc with drMap.size()==1
                    int expected_drf_size = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_MAGIC_SEQ_REC_SIZE
                            + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE
                            + DiskOfflineCompactionJUnitTest.getRVVSize(1, new int[] { 0 }, true);
                    int expected_crf_size = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_MAGIC_SEQ_REC_SIZE
                            + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE
                            + DiskOfflineCompactionJUnitTest.getRVVSize(1, new int[] { 1 }, false)
                            + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE + key3_size + tombstone_key1 + tombstone_key2;
                    int oplog_2_size = expected_drf_size + expected_crf_size;
                    if (after != oplog_2_size) {
                        cache.getLogger().info("test failed before=" + before + " after=" + after
                                + " oplogsSize=" + oplogsSize);
                        exceptionOccured[0] = true;
                    } else {
                        exceptionOccured[0] = false;
                    }
                    LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
                    lock.notify();
                }
            }
        }
    });
    try {

        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        cache.getLogger().info("putting key1");
        region.put("key1", val);
        // Disk space should have changed due to 1 put
        //assertTrue("stats did not increase after put 1 ", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        cache.getLogger().info("putting key2");
        region.put("key2", val);
        //assertTrue("stats did not increase after put 2", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();

        cache.getLogger().info("removing key1");
        region.remove("key1");
        cache.getLogger().info("removing key2");
        region.remove("key2");

        // This put will cause a switch as max-oplog size (900) will be exceeded (999)
        switchExpected[0] = true;
        cache.getLogger().info("putting key3");
        region.put("key3", val);
        cache.getLogger().info("waiting for compaction");
        synchronized (lock) {
            if (!okToExit[0]) {
                lock.wait(9000);
                assertTrue(okToExit[0]);
            }
            assertFalse(exceptionOccured[0]);
        }

        region.close();
    } finally {
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        CacheObserverHolder.setInstance(old);
    }
}