Example usage for java.lang InterruptedException getCause

List of usage examples for java.lang InterruptedException getCause

Introduction

In this page you can find the example usage for java.lang InterruptedException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:com.delphix.session.test.ServiceTest.java

private void executeCommand(ServiceNexus nexus, long timeout, boolean hasData) {
    ServiceRequest request;//from w ww . ja v  a  2  s .co m

    if (hasData) {
        request = new HelloRequest(new byte[128]);
    } else {
        request = new HelloRequest();
    }

    ServiceFuture future = nexus.execute(request, null, timeout);

    try {
        future.get();
    } catch (InterruptedException e) {
        fail("command interrupted", e);
    } catch (ExecutionException e) {
        fail("command failed", e.getCause());
    } catch (CancellationException e) {
        // Do nothing
    }
}

From source file:com.delphix.session.test.ServiceTest.java

private void executeIdempotent(ServiceNexus nexus) {
    HelloRequest hello = new HelloRequest();
    hello.setIdempotent(true);/*from   w  ww  . jav  a 2s.  com*/

    ServiceFuture future = nexus.execute(hello);

    try {
        future.get();
        fail("idempotent command succeeded");
    } catch (InterruptedException e) {
        fail("idempotent command interrupted");
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();

        if (!(cause instanceof IdempotentRetryException)) {
            fail("idempotent command failed", e);
        }
    }

    future = nexus.execute(hello);

    try {
        future.get();
    } catch (InterruptedException e) {
        fail("idempotent command interrupted");
    } catch (ExecutionException e) {
        fail("idempotent command failed", e);
    }
}

From source file:org.jspresso.hrsample.backend.JspressoUnitOfWorkTest.java

/**
 * Tests in TX collection element update with // optimistic locking.
 */// w w  w.  jav a2s.c  o  m
@Test
public void testInTXCollectionElementUpdate() {
    final HibernateBackendController hbc = (HibernateBackendController) getBackendController();

    final AtomicInteger countDown = new AtomicInteger(10);
    ExecutorService es = Executors.newFixedThreadPool(countDown.get());
    List<Future<Set<String>>> futures = new ArrayList<Future<Set<String>>>();
    for (int t = countDown.intValue(); t > 0; t--) {
        futures.add(es.submit(new Callable<Set<String>>() {

            @Override
            public Set<String> call() throws Exception {
                final HibernateBackendController threadHbc = getApplicationContext()
                        .getBean("applicationBackController", HibernateBackendController.class);
                final TransactionTemplate threadTT = threadHbc.getTransactionTemplate();
                threadHbc.start(hbc.getLocale(), hbc.getClientTimeZone());
                threadHbc.setApplicationSession(hbc.getApplicationSession());
                BackendControllerHolder.setThreadBackendController(threadHbc);
                return threadTT.execute(new TransactionCallback<Set<String>>() {

                    /**
                     * {@inheritDoc}
                     */
                    @Override
                    public Set<String> doInTransaction(TransactionStatus status) {
                        DetachedCriteria compCrit = DetachedCriteria.forClass(Company.class);
                        Set<String> names = new HashSet<String>();
                        Company c = (Company) compCrit.getExecutableCriteria(threadHbc.getHibernateSession())
                                .list().iterator().next();

                        synchronized (countDown) {
                            countDown.decrementAndGet();
                            // wait for all threads to arrive here so that we are sure they
                            // have all read the same data.
                            try {
                                countDown.wait();
                            } catch (InterruptedException ex) {
                                throw new BackendException("Test has been interrupted");
                            }
                        }

                        if (c.getName().startsWith("TX_")) {
                            throw new BackendException("Wrong data read from DB");
                        }
                        c.setName("TX_" + Long.toHexString(System.currentTimeMillis()));
                        names.add(c.getName());
                        for (Department d : c.getDepartments()) {
                            d.setName(Long.toHexString(System.currentTimeMillis()));
                            names.add(d.getName());
                        }
                        return names;
                    }
                });
            }
        }));
    }
    while (countDown.get() > 0) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ex) {
            throw new BackendException("Test has been interrupted");
        }
    }
    synchronized (countDown) {
        countDown.notifyAll();
    }
    int successfullTxCount = 0;
    Set<String> names = new HashSet<String>();
    for (Future<Set<String>> f : futures) {
        try {
            names = f.get();
            successfullTxCount++;
        } catch (Exception ex) {
            if (ex.getCause() instanceof OptimisticLockingFailureException) {
                // safely ignore since this is what we are testing.
            } else {
                throw new BackendException(ex);
            }
        }
    }
    es.shutdown();
    assertTrue("Only 1 TX succeeded", successfullTxCount == 1);

    DetachedCriteria compCrit = DetachedCriteria.forClass(Company.class);
    Company c = hbc.findFirstByCriteria(compCrit, EMergeMode.MERGE_LAZY, Company.class);
    assertTrue("the company name is the one of the successfull TX", names.contains(c.getName()));
    for (Department d : c.getDepartments()) {
        assertTrue("the department name is the one of the successfull TX", names.contains(d.getName()));
    }
}

From source file:com.mirth.connect.donkey.server.channel.Channel.java

/**
 * Start the channel and all of the channel's connectors.
 *///from w w  w.j a v  a  2s . c om
public synchronized void start(Set<Integer> connectorsToStart) throws StartException {
    if (currentState == DeployedState.DEPLOYING || currentState == DeployedState.STOPPED) {
        List<Integer> startedMetaDataIds = new ArrayList<Integer>();

        try {
            ThreadUtils.checkInterruptedStatus();

            updateCurrentState(DeployedState.STARTING);

            /*
             * We can't guarantee the state of the process lock when the channel was stopped or
             * halted, so we just reset it.
             */
            processLock.reset();
            removeContentLock = new ReentrantLock(true);
            dispatchThreads.clear();
            shuttingDown = false;
            stopSourceQueue = false;

            // Remove any items in the queue's buffer because they may be outdated and refresh the queue size.
            sourceQueue.invalidate(true, true);

            channelExecutor = Executors.newCachedThreadPool();

            // start the destination connectors but not the destination queues
            for (DestinationChainProvider chainProvider : destinationChainProviders) {
                for (Integer metaDataId : chainProvider.getMetaDataIds()) {
                    DestinationConnector destinationConnector = chainProvider.getDestinationConnectors()
                            .get(metaDataId);

                    if (destinationConnector.getCurrentState() == DeployedState.STOPPED
                            && (connectorsToStart == null || connectorsToStart.contains(metaDataId))) {
                        startedMetaDataIds.add(metaDataId);
                        destinationConnector.start();
                    }
                }
            }

            ThreadUtils.checkInterruptedStatus();
            try {
                processUnfinishedMessages();
            } catch (InterruptedException e) {
                logger.error("Startup recovery interrupted for channel " + name + " (" + channelId + ")", e);
                throw e;
            } catch (Exception e) {
                Throwable cause;
                if (e instanceof ExecutionException) {
                    cause = e.getCause();
                } else {
                    cause = e;
                }

                logger.error("Startup recovery failed for channel " + name + " (" + channelId + "): "
                        + cause.getMessage(), cause);
            }

            ThreadUtils.checkInterruptedStatus();

            // start the destination queues
            for (Integer metaDataId : startedMetaDataIds) {
                getDestinationConnector(metaDataId).startQueue();
            }

            // Remove any items in the queue's buffer because they may be outdated and refresh the queue size.
            sourceQueue.invalidate(true, true);

            // start up the worker thread that will process queued messages
            if (!sourceConnector.isRespondAfterProcessing()) {
                int processingThreads = ((SourceConnectorPropertiesInterface) sourceConnector
                        .getConnectorProperties()).getSourceConnectorProperties().getProcessingThreads();
                queueThreads.clear();
                for (int i = 1; i <= processingThreads; i++) {
                    Thread queueThread = new Thread(Channel.this);
                    queueThread.setName("Source Queue Thread " + i + " on " + name + " (" + channelId + ")");
                    queueThread.start();
                    queueThreads.put(queueThread.getId(), queueThread);
                }
            }

            if (connectorsToStart == null || connectorsToStart.contains(0)) {
                ThreadUtils.checkInterruptedStatus();
                // start up the source connector
                if (sourceConnector.getCurrentState() == DeployedState.STOPPED) {
                    startedMetaDataIds.add(0);
                    sourceConnector.start();
                }

                updateCurrentState(DeployedState.STARTED);
            } else {
                updateCurrentState(DeployedState.PAUSED);
            }
        } catch (Throwable t) {
            if (t instanceof InterruptedException) {
                throw new StartException("Start channel task for " + name + " (" + channelId
                        + ") terminated by halt notification.", t);
            }
            // If an exception occurred, then attempt to rollback by stopping all the connectors that were started
            try {
                updateCurrentState(DeployedState.STOPPING);
                stop(startedMetaDataIds);
                updateCurrentState(DeployedState.STOPPED);
            } catch (Throwable t2) {
                if (t2 instanceof InterruptedException) {
                    throw new StartException("Start channel task for " + name + " (" + channelId
                            + ") terminated by halt notification.", t);
                }

                updateCurrentState(DeployedState.STOPPED);
            }

            throw new StartException("Failed to start channel " + name + " (" + channelId + ").", t);
        }
    } else {
        logger.warn(
                "Failed to start channel " + name + " (" + channelId + "): The channel is already running.");
    }
}

From source file:com.joyent.manta.client.MantaClient.java

/**
 * <p>Finds all directories and files recursively under a given path. Since
 * this method returns a {@link Stream}, consumers can add their own
 * additional filtering based on path, object type or other criteria.</p>
 *
 * <p>This method will make each request to each subdirectory in parallel.
 * Parallelism settings are set by JDK system property:
 * <code>java.util.concurrent.ForkJoinPool.common.parallelism</code></p>
 *
 * <p>When using a filter with this method, if the filter matches a directory,
 * then all subdirectory results for that directory will be excluded. If you
 * want to perform a match against all results, then use {@link #find(String)}
 * and then filter on the stream returned.</p>
 *
 * <p><strong>WARNING:</strong> this method is not atomic and thereby not
 * safe if other operations are performed on the directory structure while
 * it is running.</p>// w w w  . ja  v a2s.  c  o  m
 *
 * @param path directory path
 * @param filter predicate class used to filter all results returned
 * @return A recursive unsorted {@link Stream} of {@link MantaObject}
 *         instances representing the contents of all subdirectories.
 */
public Stream<MantaObject> find(final String path, final Predicate<? super MantaObject> filter) {
    /* We read directly from the iterator here to reduce the total stack
     * frames and to reduce the amount of abstraction to a minimum.
     *
     * Within this loop, we store all of the objects found in memory so
     * that we can later query find() methods for the directory objects
     * in parallel. */
    final Stream.Builder<MantaObject> objectBuilder = Stream.builder();
    final Stream.Builder<MantaObject> dirBuilder = Stream.builder();

    try (MantaDirectoryListingIterator itr = streamingIterator(path)) {
        while (itr.hasNext()) {
            final Map<String, Object> item = itr.next();
            final MantaObject obj = MantaObjectConversionFunction.INSTANCE.apply(item);

            /* We take a predicate as a method parameter because it allows
             * us to filter at the highest level within this iterator. If
             * we just passed the stream as is back to the user, then
             * they would have to filter the results *after* all of the
             * HTTP requests were made. This way the filter can help limit
             * the total number of HTTP requests made to Manta. */
            if (filter == null || filter.test(obj)) {
                objectBuilder.accept(obj);

                if (obj.isDirectory()) {
                    dirBuilder.accept(obj);
                }
            }
        }
    }

    /* All objects within this directory should be included in the results,
     * so we have a stream stored here that will later be concatenated. */
    final Stream<MantaObject> objectStream = objectBuilder.build();

    /* Directories are processed in parallel because it is the only unit
     * within our abstractions that can be properly done in parallel.
     * MantaDirectoryListingIterator forces all paging of directory
     * listings to be sequential requests. However, it works fine to
     * run multiple MantaDirectoryListingIterator instances per request.
     * That is exactly what we are doing here using streams which is
     * allowing us to do the recursive calls in a lazy fashion.
     *
     * From a HTTP request perspective, this means that only the listing for
     * this current highly directory is performed and no other listing
     * will be performed until the stream is read.
     */
    try {
        final Stream<MantaObject> dirStream = findForkJoinPool
                .submit(() -> dirBuilder.build().parallel().flatMap(obj -> find(obj.getPath(), filter))).get();

        /* Due to the way we concatenate the results will be quite out of order
         * if a consumer needs sorted results that is their responsibility. */
        final Stream<MantaObject> stream = Stream.concat(objectStream, dirStream);

        danglingStreams.add(stream);

        return stream;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        return Stream.empty();
    } catch (ExecutionException e) {
        throw new MantaException(e.getCause());
    }
}

From source file:tauargus.gui.PanelTable.java

private void buttonSuppressActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_buttonSuppressActionPerformed
    JFrame parentFrame = getParentFrame();
    SuppressionMethod Soort = SuppressionMethod.GHMITER;
    if (radioButtonHyperCube.isSelected())
        Soort = SuppressionMethod.GHMITER;
    if (radioButtonModular.isSelected())
        Soort = SuppressionMethod.HITAS;
    if (radioButtonOptimal.isSelected())
        Soort = SuppressionMethod.OPTIMAL;
    if (radioButtonMarginal.isSelected())
        Soort = SuppressionMethod.MARGINAL;
    if (radioButtonNetwork.isSelected())
        Soort = SuppressionMethod.NETWORK;
    if (radioButtonUwe.isSelected())
        Soort = SuppressionMethod.UWE;// ww w .j  a  v a 2 s.c om
    if (radioButtonCta.isSelected())
        Soort = SuppressionMethod.CTA;
    if (radioButtonRounding.isSelected())
        Soort = SuppressionMethod.ROUNDING;

    if (Soort.isAdditivityDesirable() && !tableSet.isAdditive) {
        if (JOptionPane.NO_OPTION == JOptionPane.showConfirmDialog(this,
                "Table is not additive. Optimisation routines might be tricky\nDo you want to proceed?",
                "Question", JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE)) {
            return;
        }
    }

    if (Soort.isUsingSuppliedCostFunction()) {
        if (tableSet.costFunc == TableSet.COST_DIST && !Soort.canUseDistanceFunction()) {
            JOptionPane.showMessageDialog(null, "Distance function is not available for this solution");
            return;
        }
    }

    CellStatusStatistics statistics = tableSet.getCellStatusStatistics();
    if (statistics == null) {
        return; // TODO Show message
    }
    int totalUnsafeCells = statistics.totalPrimaryUnsafe();

    if (!Soort.isCosmetic()) {
        if (totalUnsafeCells == 0) {
            JOptionPane.showMessageDialog(this, "No unsafe cells found\nNo protection required");
            return;
        }
    }

    if (Soort == SuppressionMethod.OPTIMAL && totalUnsafeCells > 50) {
        if (JOptionPane.NO_OPTION == JOptionPane.showConfirmDialog(this,
                "This tabel contains " + totalUnsafeCells
                        + " unsafe cells\nthis might take a long time; do you want to proceed?",
                "Question", JOptionPane.YES_NO_OPTION)) {
            return;
        }
    }

    if (Soort.isMinMaxTableValueNeeded()) {
        if (!VraagMinTabVal(Soort, tableSet)) {
            return;
        }
    }

    switch (Soort) {
    case ROUNDING:
        if (Application.solverSelected == Application.SOLVER_CPLEX) {
            JOptionPane.showMessageDialog(null,
                    "Whether controlled rounding can be used when Cplex is selected as solver, depends on your specific license",
                    "", JOptionPane.ERROR_MESSAGE);
        }
        //else
        DialogRoundingParameters paramsR = new DialogRoundingParameters(parentFrame, true);
        if (paramsR.showDialog(tableSet) == DialogRoundingParameters.APPROVE_OPTION) {
            final SwingWorker<Integer, Void> worker = new ProgressSwingWorker<Integer, Void>(
                    ProgressSwingWorker.ROUNDER, "Rounding") {
                @Override
                protected Integer doInBackground() throws ArgusException, Exception {
                    super.doInBackground();
                    OptiSuppress.runRounder(tableSet, getPropertyChangeListener());
                    return null;
                }

                @Override
                protected void done() {
                    super.done();
                    try {
                        get();
                        JOptionPane.showMessageDialog(null,
                                "The table has been rounded\n" + "Number of steps: " + tableSet.roundMaxStep
                                        + "\n" + "Max step: "
                                        + StrUtils.formatDouble(tableSet.roundMaxJump,
                                                tableSet.respVar.nDecimals)
                                        + "\n" + "Processing time: "
                                        + StrUtils.timeToString(tableSet.processingTime));
                        ((AbstractTableModel) table.getModel()).fireTableDataChanged();
                        adjustColumnWidths();
                        updateSuppressButtons();
                    } catch (InterruptedException ex) {
                        logger.log(Level.SEVERE, null, ex);
                    } catch (ExecutionException ex) {
                        JOptionPane.showMessageDialog(null, ex.getCause().getMessage());
                    }
                }
            };

            worker.execute();
            /*                try{
            OptiSuppress.runRounder(tableSet);
            JOptionPane.showMessageDialog(null, "The table has been rounded\n" +
                        "Number of steps: " + tableSet.roundMaxStep+"\n"+
                        "Max step: " + 
                        StrUtils.formatDouble(tableSet.roundMaxJump, tableSet.respVar.nDecimals)  +"\n"+
                        "Processing time: " + StrUtils.timeToString(tableSet.processingTime));
            ((AbstractTableModel)table.getModel()).fireTableDataChanged();
            adjustColumnWidths();
            updateSuppressButtons();
                            }
            // Anco 1.6                
            //                catch (ArgusException | IOException ex) {
                            catch (ArgusException ex) {
            JOptionPane.showMessageDialog(this, ex.getMessage());}
                            catch (IOException ex) {
            JOptionPane.showMessageDialog(this, ex.getMessage());
                            }*/
        }
        break;
    case CTA: //do CTA
        final int i = JOptionPane.showConfirmDialog(parentFrame, "Do you prefer to use the expert version?",
                "Select CTA version", JOptionPane.YES_NO_CANCEL_OPTION);
        if ((i == JOptionPane.YES_OPTION) || (i == JOptionPane.NO_OPTION)) {
            new Thread() {
                @Override
                public void run() {
                    try {
                        OptiSuppress.RunCTA(tableSet, (i == JOptionPane.YES_OPTION));
                        JOptionPane.showMessageDialog(null,
                                "The CTA procedure has been completed\n" + tableSet.nSecond
                                        + " cells have been modified\n"
                                        + StrUtils.timeToString(tableSet.processingTime) + " needed");
                        ((AbstractTableModel) table.getModel()).fireTableDataChanged();
                        adjustColumnWidths();
                        updateSuppressButtons();
                    } catch (ArgusException ex) {
                        JOptionPane.showMessageDialog(null, ex.getMessage());
                    } catch (FileNotFoundException ex) {
                        JOptionPane.showMessageDialog(null, ex.getMessage());
                    } catch (IOException ex) {
                        JOptionPane.showMessageDialog(null, ex.getMessage());
                    }
                }
            }.start();
        }
        break;
    case UWE:
        DialogModularParameters uweParams = new DialogModularParameters(parentFrame, tableSet, false, true);
        if (uweParams.showDialog() == DialogModularParameters.APPROVE_OPTION) {
            new Thread() {
                @Override
                public void run() {
                    try {
                        OptiSuppress.runUWE(tableSet);
                        JOptionPane.showMessageDialog(null,
                                "The UWE procedure has finished the protection\n" + tableSet.nSecond
                                        + " cells have been suppressed\n"
                                        + StrUtils.timeToString(tableSet.processingTime) + " needed");
                        ((AbstractTableModel) table.getModel()).fireTableDataChanged();
                        adjustColumnWidths();
                        updateSuppressButtons();
                    } catch (ArgusException ex) {
                        JOptionPane.showMessageDialog(null, ex.getMessage());
                    } catch (IOException ex) {
                        JOptionPane.showMessageDialog(null, ex.getMessage());
                    }
                }
            }.start();
        }
        break;
    case GHMITER:
        DialogHypercubeParameters paramsG = new DialogHypercubeParameters(parentFrame, true);
        if (paramsG.showDialog(tableSet) == DialogHypercubeParameters.APPROVE_OPTION) {
            new Thread() {
                @Override
                public void run() {
                    try {
                        GHMiter.RunGHMiter(tableSet);
                        JOptionPane.showMessageDialog(null,
                                "The Hypercube has finished the protection\n" + tableSet.nSecond
                                        + " cells have been suppressed\n" + tableSet.ghMiterMessage
                                        + StrUtils.timeToString(tableSet.processingTime) + " needed");
                        //                              tableSet.suppressed = TableSet.SUP_GHMITER;
                        if (argus.utils.TauArgusUtils.ExistFile(Application.getTempFile("frozen.txt"))) {
                            DialogInfo Info = new DialogInfo(getParentFrame(), true);
                            Info.addLabel("Overview of the frozen cells");
                            try {
                                Info.addTextFile(Application.getTempFile("frozen.txt"));
                            } catch (ArgusException ex1) {
                            }
                            ;
                            Info.setVisible(true);
                        }
                        ((AbstractTableModel) table.getModel()).fireTableDataChanged();
                        adjustColumnWidths();
                        updateSuppressButtons();
                    } catch (ArgusException ex) {

                        JOptionPane.showMessageDialog(null, ex.getMessage());
                        if (GHMiter.ShowProto002) {
                            DialogInfo Info = new DialogInfo(getParentFrame(), true);
                            Info.addLabel("Overview of the file PROTO002");
                            Info.setSize(1000, 500);
                            Info.setLocationRelativeTo(null);
                            try {
                                Info.addTextFile(Application.getTempFile("PROTO002"));
                            } catch (ArgusException ex1) {
                            }
                            ;
                            Info.setVisible(true);
                        }
                    }
                }
            }.start();
        }
        // run hypercube method
        break;
    /*            case HITAS:
    DialogModularParameters params = new DialogModularParameters(parentFrame, tableSet, false, true);
    params.showDialog();
    try {
        boolean oke = OptiSuppress.runModular(tableSet);
        JOptionPane.showMessageDialog(this, "Modular has finished the protection\n"
            + tableSet.nSecond + " cells have been suppressed\n"
            + StrUtils.timeToString(tableSet.processingTime) + " needed");
    } //|FileNotFoundException
    catch (ArgusException | IOException ex) {
        JOptionPane.showMessageDialog(this, ex.getMessage());
    }
    break;
    */
    case HITAS:
        DialogModularParameters params = new DialogModularParameters(parentFrame, tableSet, false, true);
        if (params.showDialog() == DialogModularParameters.APPROVE_OPTION) {
            final SwingWorker<Integer, Void> worker = new ProgressSwingWorker<Integer, Void>(
                    ProgressSwingWorker.DOUBLE, "Modular approach") {
                @Override
                protected Integer doInBackground() throws ArgusException, Exception {
                    super.doInBackground();
                    OptiSuppress.runModular(tableSet, getPropertyChangeListener());
                    return null;
                }

                @Override
                protected void done() {
                    super.done();
                    try {
                        get();
                        JOptionPane.showMessageDialog(null,
                                "Modular has finished the protection\n" + tableSet.nSecond
                                        + " cells have been suppressed\n"
                                        + StrUtils.timeToString(tableSet.processingTime) + " needed");
                        tableSet.undoAudit();
                        ((AbstractTableModel) table.getModel()).fireTableDataChanged();
                        adjustColumnWidths();
                        updateSuppressButtons();
                    } catch (InterruptedException ex) {
                        logger.log(Level.SEVERE, null, ex);
                    } catch (ExecutionException ex) {
                        JOptionPane.showMessageDialog(null, ex.getCause().getMessage());
                    }
                }
            };
            worker.execute();
        }
        break;
    case OPTIMAL:
        /*               params = new DialogModularParameters(parentFrame, tableSet, true, true);
        params.showDialog();
        try{
        OptiSuppress.runOptimal(tableSet);
        JOptionPane.showMessageDialog(null, "Optimal has finished the protection\n"
            + tableSet.nSecond + " cells have been suppressed\n"
                        + StrUtils.timeToString(tableSet.processingTime) + " needed");
        tableSet.hasBeenAudited = false;
        } catch (ArgusException| IOException  ex)
        {JOptionPane.showMessageDialog(this, ex.getMessage());
        }
        // run optimal
        */
        params = new DialogModularParameters(parentFrame, tableSet, true, true);
        if (params.showDialog() == DialogModularParameters.APPROVE_OPTION) {
            final SwingWorker<Void, Void> worker = new ProgressSwingWorker<Void, Void>(
                    ProgressSwingWorker.VALUES, "Optimal approach") {

                // called in a separate thread...
                @Override
                protected Void doInBackground() throws ArgusException, Exception {
                    super.doInBackground();
                    OptiSuppress.runOptimal(tableSet, getPropertyChangeListener(),
                            checkBoxInverseWeight.isSelected(), false, 1);
                    return null;
                }

                // called on the GUI thread
                @Override
                protected void done() {
                    super.done();
                    try {
                        get();
                        JOptionPane.showMessageDialog(null,
                                "Optimal has finished the protection\n" + tableSet.nSecond
                                        + " cells have been suppressed\n"
                                        + StrUtils.timeToString(tableSet.processingTime) + " needed");
                        tableSet.undoAudit();
                        ((AbstractTableModel) table.getModel()).fireTableDataChanged();
                        adjustColumnWidths();
                        updateSuppressButtons();
                    } catch (InterruptedException ex) {
                        logger.log(Level.SEVERE, null, ex);
                    } catch (ExecutionException ex) {
                        JOptionPane.showMessageDialog(null, ex.getCause().getMessage());
                    }
                }
            };
            worker.execute();
        }
        break;
    case NETWORK:
        try {
            OptiSuppress.TestNetwork(tableSet);
        } catch (ArgusException ex) {
            JOptionPane.showMessageDialog(null, ex.getMessage());
            break;
        }
        DialogNetwork paramsN = new DialogNetwork(parentFrame, true, tableSet);
        if (paramsN.showDialog() == DialogRoundingParameters.APPROVE_OPTION) {
            new Thread() {
                @Override
                public void run() {
                    try {
                        OptiSuppress.RunNetwork(tableSet);
                        JOptionPane.showMessageDialog(null,
                                "The network has finished the protection\n" + tableSet.nSecond
                                        + " cells have been suppressed\n"
                                        + StrUtils.timeToString(tableSet.processingTime) + " needed");
                        ((AbstractTableModel) table.getModel()).fireTableDataChanged();
                        adjustColumnWidths();
                        updateSuppressButtons();
                    } catch (ArgusException ex) {
                        JOptionPane.showMessageDialog(null, ex.getMessage());
                    }
                }
            }.start();
        }
        break;
    case MARGINAL: {
        JOptionPane.showMessageDialog(null, "The marginal method still has to be implemented");
    }
    }
    //updateSuppressButtons(); // Needs to be at each try{} of "done"because it will not wait for finishing of ProcessSwingWorker
    //        Suppress(tableIndex, false, Soort);
    tableSet.undoAudit();

    // TODO Optimalisation: Only do this if a suppression method has run
    //((AbstractTableModel)table.getModel()).fireTableDataChanged();
    //adjustColumnWidths();
}

From source file:org.eclipse.che.api.builder.internal.SourcesManagerImpl.java

@Override
public void getSources(BuildLogger logger, String workspace, String project, final String sourcesUrl,
        java.io.File workDir) throws IOException {
    // Directory for sources. Keep sources to avoid download whole project before build.
    // This directory is not permanent and may be removed at any time.
    final java.io.File srcDir = new java.io.File(directory, workspace + java.io.File.separatorChar + project);
    // Temporary directory where we copy sources before build.
    final String key = workspace + project;
    try {/*from www .j  a  va2 s. c  o  m*/
        synchronized (this) {
            while (key.equals(projectKeyHolder.get())) {
                wait();
            }
        }
    } catch (InterruptedException e) {
        LOG.error(e.getMessage(), e);
        Thread.currentThread().interrupt();
    }
    // Avoid multiple threads download source of the same project.
    Future<Void> future = tasks.get(key);
    final ValueHolder<IOException> errorHolder = new ValueHolder<>();
    if (future == null) {
        final FutureTask<Void> newFuture = new FutureTask<>(new Runnable() {
            @Override
            public void run() {
                try {
                    download(sourcesUrl, srcDir);
                } catch (IOException e) {
                    LOG.error(e.getMessage(), e);
                    errorHolder.set(e);
                }
            }
        }, null);
        future = tasks.putIfAbsent(key, newFuture);
        if (future == null) {
            future = newFuture;
            try {
                // Need a bit time before to publish sources download start message via websocket
                // as client may not have already subscribed to the channel so early in build task execution
                Thread.sleep(300);
            } catch (InterruptedException e) {
                LOG.error(e.getMessage(), e);
            }
            logger.writeLine("[INFO] Injecting source code into builder...");
            newFuture.run();
            logger.writeLine("[INFO] Source code injection finished"
                    + "\n[INFO] ------------------------------------------------------------------------");
        }
    }
    try {
        future.get(); // Block thread until download is completed.
        final IOException ioError = errorHolder.get();
        if (ioError != null) {
            throw ioError;
        }
        IoUtil.copy(srcDir, workDir, IoUtil.ANY_FILTER);
        for (SourceManagerListener listener : listeners) {
            listener.afterDownload(new SourceManagerEvent(workspace, project, sourcesUrl, workDir));
        }
        if (!srcDir.setLastModified(System.currentTimeMillis())) {
            LOG.error("Unable update modification date of {} ", srcDir);
        }
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    } catch (ExecutionException e) {
        // Runnable does not throw checked exceptions.
        final Throwable cause = e.getCause();
        if (cause instanceof Error) {
            throw (Error) cause;
        } else {
            throw (RuntimeException) cause;
        }
    } finally {
        tasks.remove(key);
    }
}

From source file:de.fiz.ddb.aas.auxiliaryoperations.ThreadOrganisationCreate.java

private void createOrg() throws ExecutionException, IllegalArgumentException, AASUnauthorizedException {
    InitialLdapContext vCtx = null;

    Attributes vOrgAttributes = new BasicAttributes(true);

    BasicAttribute objectClass = new BasicAttribute("objectclass", "top");
    objectClass.add(Constants.ldap_ddbOrg_ObjectClass);
    objectClass.add("organization");

    vOrgAttributes.put(objectClass);/*from   w  w  w .jav a2s.  co m*/

    // ---All this occurs only if that is not a copy in the export directory
    if (!this.isAddToLicensedOrgs()) {

        // -- When creating the status always set on Pending:
        if (!this.isIngestingOperation()) {
            this._orgObj.setStatus(ConstEnumOrgStatus.pending);
            long vTimeStamp = new Date().getTime();
            this._orgObj.setModified(vTimeStamp);
            this._orgObj.setCreated(vTimeStamp);
        }

        if (this._performer != null) {
            this._orgObj.setModifiedBy(this._performer.getUid());
            this._orgObj.setCreatedBy(this._performer.getUid());
        }

        // -- Is null, if it was isIngestingOperation or isAddToLicensedOrgs 
        //    and therefore does not need to be additionally checked
        if (_submit != null) {
            GeoAdresse vGeoAdresse;
            try {
                vGeoAdresse = _submit.get(50, TimeUnit.SECONDS);
                if (vGeoAdresse.getRequestStatus() == GeoRequestStatus.OK) {
                    this._orgObj.getAddress().setLatitude(vGeoAdresse.getLatitude());
                    this._orgObj.getAddress().setLongitude(vGeoAdresse.getLongitude());
                    this._orgObj.getAddress().setLocationDisplayName(vGeoAdresse.getLocationDisplayName());
                } else {
                    LOG.log(Level.WARNING, "GeoRequestStatus: {0}, (organization id: {1})",
                            new Object[] { vGeoAdresse.getRequestStatus(), this._orgObj.getOIDs() });
                }
            } catch (InterruptedException ex) {
                LOG.log(Level.WARNING,
                        "Geocoding request exeption for organization id: " + this._orgObj.getOIDs(), ex);
            } catch (TimeoutException ex) {
                LOG.log(Level.WARNING,
                        "Geocoding request exeption for organization id: " + this._orgObj.getOIDs(), ex);
            }
        }
    }

    // -- Conversion of parameters to LDAP attributes:
    this.convertOrganizationToLdapOrgAttrsForCreate(this._orgObj, vOrgAttributes, getPerformer());

    StringBuilder vEntryDN = (this.isAddToLicensedOrgs() ? this.getLicensedOrgsDN(this._orgObj.getOIDs())
            : this.getOrgDN(this._orgObj.getOIDs()));

    try {
        // put arbitrary (Org) Properties as JSON-String into LDAP.
        if (this._orgObj.getProperties() != null && !this._orgObj.getProperties().isEmpty()) {
            vOrgAttributes.put(new BasicAttribute(Constants.ldap_ddbOrg_Properties,
                    serializer.serialize(this._orgObj.getProperties())));
        }

        // finally bind the entry
        vCtx = LDAPConnector.getSingletonInstance().takeCtx();
        ((InitialDirContext) vCtx).bind(vEntryDN.toString(), vCtx, vOrgAttributes);

        // -- Add default privilege(s) so we can assign performer
        //    but only if that is not a copy in the export directory
        if (!this.isAddToLicensedOrgs()) {
            this._orgObj.getPrivilegesSet().add(PrivilegeEnum.ADMIN_ORG);

            // create org-privileges
            for (PrivilegeEnum p : this._orgObj.getPrivilegesSet()) {
                ThreadSinglePrivilegeCreate threadSinglePrivilegeCreate = new ThreadSinglePrivilegeCreate(p,
                        this._orgObj, this._performer);
                threadSinglePrivilegeCreate.call();
            }
            // -- Logging:
            LOG.log(Level.INFO, "One organization with DN: ''{0}'' was created.", new Object[] { vEntryDN });
        } else {
            // -- Logging:
            LOG.log(Level.INFO, "One organization with DN: ''{0}'' was copied to the export directory.",
                    new Object[] { vEntryDN });
        }
    } catch (AssertionError ex) {
        LOG.log(Level.SEVERE, null, ex);
        throw new IllegalArgumentException(ex.getMessage(), ex.getCause());
    } catch (IllegalAccessException ex) {
        LOG.log(Level.SEVERE, null, ex);
        throw new ExecutionException(ex.getMessage(), ex.getCause());
    } catch (NamingException ex) {
        // LDAP: error code 68 - ENTRY_ALREADY_EXISTS: failed for Add
        // Request
        try {
            if (vCtx != null) {
                vCtx.close();
                vCtx = null;
            }
        } catch (NamingException ex1) {
            LOG.log(Level.SEVERE, null, ex1);
        }
        try {
            vCtx = LDAPConnector.getSingletonInstance().getDirContext();
        } catch (NamingException ex1) {
            LOG.log(Level.SEVERE, null, ex1);
        } catch (IllegalAccessException ex1) {
            LOG.log(Level.SEVERE, null, ex1);
        }
        throw new IllegalArgumentException(ex.getMessage());
    } finally {
        if (vCtx != null) {
            try {
                LDAPConnector.getSingletonInstance().putCtx(vCtx);
            } catch (Exception ex) {
                LOG.log(Level.SEVERE, "Exception", ex);
            }
        }
    }

}

From source file:voldemort.store.routed.ThreadPoolRoutedStore.java

@Override
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
        Map<ByteArray, byte[]> transforms) throws VoldemortException {
    StoreUtils.assertValidKeys(keys);/*from w w w. j  a v a  2s  . c o  m*/

    Map<ByteArray, List<Versioned<byte[]>>> result = StoreUtils.newEmptyHashMap(keys);

    // Keys for each node needed to satisfy storeDef.getPreferredReads() if
    // no failures.
    Map<Node, List<ByteArray>> nodeToKeysMap = Maps.newHashMap();

    // Keep track of nodes per key that might be needed if there are
    // failures during getAll
    Map<ByteArray, List<Node>> keyToExtraNodesMap = Maps.newHashMap();

    for (ByteArray key : keys) {
        List<Node> availableNodes = availableNodes(routingStrategy.routeRequest(key.get()));

        // quickly fail if there aren't enough nodes to meet the requirement
        checkRequiredReads(availableNodes);
        int preferredReads = storeDef.getPreferredReads();
        List<Node> preferredNodes = Lists.newArrayListWithCapacity(preferredReads);
        List<Node> extraNodes = Lists.newArrayListWithCapacity(3);

        for (Node node : availableNodes) {
            if (preferredNodes.size() < preferredReads)
                preferredNodes.add(node);
            else
                extraNodes.add(node);
        }

        for (Node node : preferredNodes) {
            List<ByteArray> nodeKeys = nodeToKeysMap.get(node);
            if (nodeKeys == null) {
                nodeKeys = Lists.newArrayList();
                nodeToKeysMap.put(node, nodeKeys);
            }
            nodeKeys.add(key);
        }
        if (!extraNodes.isEmpty()) {
            List<Node> nodes = keyToExtraNodesMap.get(key);
            if (nodes == null)
                keyToExtraNodesMap.put(key, extraNodes);
            else
                nodes.addAll(extraNodes);
        }
    }

    List<Callable<GetAllResult>> callables = Lists.newArrayList();
    for (Map.Entry<Node, List<ByteArray>> entry : nodeToKeysMap.entrySet()) {
        final Node node = entry.getKey();
        final Collection<ByteArray> nodeKeys = entry.getValue();
        if (failureDetector.isAvailable(node))
            callables.add(new GetAllCallable(node, nodeKeys, transforms));
    }

    // A list of thrown exceptions, indicating the number of failures
    List<Throwable> failures = Lists.newArrayList();
    List<NodeValue<ByteArray, byte[]>> nodeValues = Lists.newArrayList();

    Map<ByteArray, MutableInt> keyToSuccessCount = Maps.newHashMap();
    for (ByteArray key : keys)
        keyToSuccessCount.put(key, new MutableInt(0));

    List<Future<GetAllResult>> futures;
    long timeoutMs = timeoutConfig.getOperationTimeout(VoldemortOpCode.GET_ALL_OP_CODE);
    try {
        // TODO What to do about timeouts? They should be longer as getAll
        // is likely to
        // take longer. At the moment, it's just timeoutMs * 3, but should
        // this be based on the number of the keys?
        futures = executor.invokeAll(callables, timeoutMs * 3, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        throw new InsufficientOperationalNodesException("getAll operation interrupted.", e);
    }
    for (Future<GetAllResult> f : futures) {
        if (f.isCancelled()) {
            logger.warn("Get operation timed out after " + timeoutMs + " ms.");
            continue;
        }
        try {
            GetAllResult getResult = f.get();
            if (getResult.exception != null) {
                if (getResult.exception instanceof VoldemortApplicationException) {
                    throw (VoldemortException) getResult.exception;
                }
                failures.add(getResult.exception);
                continue;
            }
            for (ByteArray key : getResult.callable.nodeKeys) {
                List<Versioned<byte[]>> retrieved = getResult.retrieved.get(key);
                MutableInt successCount = keyToSuccessCount.get(key);
                successCount.increment();

                /*
                 * retrieved can be null if there are no values for the key
                 * provided
                 */
                if (retrieved != null) {
                    List<Versioned<byte[]>> existing = result.get(key);
                    if (existing == null)
                        result.put(key, Lists.newArrayList(retrieved));
                    else
                        existing.addAll(retrieved);
                }
            }
            nodeValues.addAll(getResult.nodeValues);

        } catch (InterruptedException e) {
            throw new InsufficientOperationalNodesException("getAll operation interrupted.", e);
        } catch (ExecutionException e) {
            // We catch all Throwables apart from Error in the callable, so
            // the else part
            // should never happen
            if (e.getCause() instanceof Error)
                throw (Error) e.getCause();
            else
                logger.error(e.getMessage(), e);
        }
    }

    for (ByteArray key : keys) {
        MutableInt successCountWrapper = keyToSuccessCount.get(key);
        int successCount = successCountWrapper.intValue();
        if (successCount < storeDef.getPreferredReads()) {
            List<Node> extraNodes = keyToExtraNodesMap.get(key);
            if (extraNodes != null) {
                for (Node node : extraNodes) {
                    long startNs = System.nanoTime();
                    try {
                        List<Versioned<byte[]>> values = innerStores.get(node.getId()).get(key,
                                transforms == null ? null : transforms.get(key));
                        fillRepairReadsValues(nodeValues, key, node, values);
                        List<Versioned<byte[]>> versioneds = result.get(key);
                        if (versioneds == null)
                            result.put(key, Lists.newArrayList(values));
                        else
                            versioneds.addAll(values);
                        recordSuccess(node, startNs);
                        if (++successCount >= storeDef.getPreferredReads())
                            break;

                    } catch (UnreachableStoreException e) {
                        failures.add(e);
                        recordException(node, startNs, e);
                    } catch (VoldemortApplicationException e) {
                        throw e;
                    } catch (Exception e) {
                        logger.warn("Error in GET_ALL on node " + node.getId() + "(" + node.getHost() + ")", e);
                        failures.add(e);
                    }
                }
            }
        }
        successCountWrapper.setValue(successCount);
    }

    repairReads(nodeValues, repairReads && (transforms == null || transforms.size() == 0));

    for (Map.Entry<ByteArray, MutableInt> mapEntry : keyToSuccessCount.entrySet()) {
        int successCount = mapEntry.getValue().intValue();
        if (successCount < storeDef.getRequiredReads())
            throw new InsufficientOperationalNodesException(
                    this.storeDef.getRequiredReads() + " reads required, but " + successCount + " succeeded.",
                    failures);
    }

    return result;
}

From source file:voldemort.store.routed.RoutedStore.java

public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys) throws VoldemortException {
    StoreUtils.assertValidKeys(keys);//from  w  ww  .  j  a v  a  2  s  . c om

    Map<ByteArray, List<Versioned<byte[]>>> result = StoreUtils.newEmptyHashMap(keys);

    // Keys for each node needed to satisfy storeDef.getPreferredReads() if
    // no failures.
    Map<Node, List<ByteArray>> nodeToKeysMap = Maps.newHashMap();

    // Keep track of nodes per key that might be needed if there are
    // failures during getAll
    Map<ByteArray, List<Node>> keyToExtraNodesMap = Maps.newHashMap();

    for (ByteArray key : keys) {
        List<Node> availableNodes = availableNodes(routingStrategy.routeRequest(key.get()));

        // quickly fail if there aren't enough nodes to meet the requirement
        checkRequiredReads(availableNodes);
        int preferredReads = storeDef.getPreferredReads();
        List<Node> preferredNodes = Lists.newArrayListWithCapacity(preferredReads);
        List<Node> extraNodes = Lists.newArrayListWithCapacity(3);

        for (Node node : availableNodes) {
            if (preferredNodes.size() < preferredReads)
                preferredNodes.add(node);
            else
                extraNodes.add(node);
        }

        for (Node node : preferredNodes) {
            List<ByteArray> nodeKeys = nodeToKeysMap.get(node);
            if (nodeKeys == null) {
                nodeKeys = Lists.newArrayList();
                nodeToKeysMap.put(node, nodeKeys);
            }
            nodeKeys.add(key);
        }
        if (!extraNodes.isEmpty()) {
            List<Node> nodes = keyToExtraNodesMap.get(key);
            if (nodes == null)
                keyToExtraNodesMap.put(key, extraNodes);
            else
                nodes.addAll(extraNodes);
        }
    }

    List<Callable<GetAllResult>> callables = Lists.newArrayList();
    for (Map.Entry<Node, List<ByteArray>> entry : nodeToKeysMap.entrySet()) {
        final Node node = entry.getKey();
        final Collection<ByteArray> nodeKeys = entry.getValue();
        if (failureDetector.isAvailable(node))
            callables.add(new GetAllCallable(node, nodeKeys));
    }

    // A list of thrown exceptions, indicating the number of failures
    List<Throwable> failures = Lists.newArrayList();
    List<NodeValue<ByteArray, byte[]>> nodeValues = Lists.newArrayList();

    Map<ByteArray, MutableInt> keyToSuccessCount = Maps.newHashMap();
    for (ByteArray key : keys)
        keyToSuccessCount.put(key, new MutableInt(0));

    List<Future<GetAllResult>> futures;
    try {
        // TODO What to do about timeouts? They should be longer as getAll
        // is likely to
        // take longer. At the moment, it's just timeoutMs * 3, but should
        // this be based on the number of the keys?
        futures = executor.invokeAll(callables, timeoutMs * 3, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        throw new InsufficientOperationalNodesException("getAll operation interrupted.", e);
    }
    for (Future<GetAllResult> f : futures) {
        if (f.isCancelled()) {
            logger.warn("Get operation timed out after " + timeoutMs + " ms.");
            continue;
        }
        try {
            GetAllResult getResult = f.get();
            if (getResult.exception != null) {
                if (getResult.exception instanceof VoldemortApplicationException) {
                    throw (VoldemortException) getResult.exception;
                }
                failures.add(getResult.exception);
                continue;
            }
            for (ByteArray key : getResult.callable.nodeKeys) {
                List<Versioned<byte[]>> retrieved = getResult.retrieved.get(key);
                MutableInt successCount = keyToSuccessCount.get(key);
                successCount.increment();

                /*
                 * retrieved can be null if there are no values for the key
                 * provided
                 */
                if (retrieved != null) {
                    List<Versioned<byte[]>> existing = result.get(key);
                    if (existing == null)
                        result.put(key, Lists.newArrayList(retrieved));
                    else
                        existing.addAll(retrieved);
                }
            }
            nodeValues.addAll(getResult.nodeValues);

        } catch (InterruptedException e) {
            throw new InsufficientOperationalNodesException("getAll operation interrupted.", e);
        } catch (ExecutionException e) {
            // We catch all Throwables apart from Error in the callable, so
            // the else part
            // should never happen
            if (e.getCause() instanceof Error)
                throw (Error) e.getCause();
            else
                logger.error(e.getMessage(), e);
        }
    }

    for (ByteArray key : keys) {
        MutableInt successCountWrapper = keyToSuccessCount.get(key);
        int successCount = successCountWrapper.intValue();
        if (successCount < storeDef.getPreferredReads()) {
            List<Node> extraNodes = keyToExtraNodesMap.get(key);
            if (extraNodes != null) {
                for (Node node : extraNodes) {
                    long startNs = System.nanoTime();
                    try {
                        List<Versioned<byte[]>> values = innerStores.get(node.getId()).get(key);
                        fillRepairReadsValues(nodeValues, key, node, values);
                        List<Versioned<byte[]>> versioneds = result.get(key);
                        if (versioneds == null)
                            result.put(key, Lists.newArrayList(values));
                        else
                            versioneds.addAll(values);
                        recordSuccess(node, startNs);
                        if (++successCount >= storeDef.getPreferredReads())
                            break;

                    } catch (UnreachableStoreException e) {
                        failures.add(e);
                        recordException(node, startNs, e);
                    } catch (VoldemortApplicationException e) {
                        throw e;
                    } catch (Exception e) {
                        logger.warn("Error in GET_ALL on node " + node.getId() + "(" + node.getHost() + ")", e);
                        failures.add(e);
                    }
                }
            }
        }
        successCountWrapper.setValue(successCount);
    }

    repairReads(nodeValues);

    for (Map.Entry<ByteArray, MutableInt> mapEntry : keyToSuccessCount.entrySet()) {
        int successCount = mapEntry.getValue().intValue();
        if (successCount < storeDef.getRequiredReads())
            throw new InsufficientOperationalNodesException(
                    this.storeDef.getRequiredReads() + " reads required, but " + successCount + " succeeded.",
                    failures);
    }

    return result;
}