Example usage for java.lang InterruptedException InterruptedException

List of usage examples for java.lang InterruptedException InterruptedException

Introduction

In this page you can find the example usage for java.lang InterruptedException InterruptedException.

Prototype

public InterruptedException(String s) 

Source Link

Document

Constructs an InterruptedException with the specified detail message.

Usage

From source file:org.gw4e.eclipse.facade.GraphWalkerFacade.java

public static List<IFile> generateOffLineFromFile1(IWorkbenchWindow ww, TestResourceGeneration dcp,
        BuildPolicy[] generators, int timeout, IProgressMonitor monitor)
        throws IOException, CoreException, InterruptedException {
    IFile graphModel = dcp.getGraphIFile();

    List<IFile> ret = new ArrayList<IFile>();

    List<Context> executionContexts = new ArrayList<Context>();
    for (BuildPolicy policy : generators) {
        String startElement = getStartElement(dcp.getGraphFile());
        Path path = dcp.getInputPath();
        String generator = policy.getPathGenerator();
        String msg = "Offline arguments: " + path + " " + generator + " " + startElement;
        ResourceManager.logInfo(graphModel.getProject().getName(), msg);
        addContexts(executionContexts, path, generator, startElement);
    }/*from w  ww .j  av a 2  s. co  m*/

    int index = 0;
    for (Context context : executionContexts) {
        OfflineContext oc = new OfflineContext(generators[index]);
        index++;
        dcp.addOfflineContext(oc);
        TestExecutor executor = new TestExecutor(context);
        executor.getMachine().addObserver(new Observer() {
            @Override
            public void update(Machine machine, Element element, EventType type) {
                if (EventType.BEFORE_ELEMENT.equals(type)) {
                    oc.addMethodName(element.getName());
                    if (monitor.isCanceled()) {
                        throw new RuntimeException(
                                new InterruptedException(MessageUtil.getString("timeoutofflineorcancelled")));
                    }
                }
            }
        });

        Timer canceller = new Timer();
        canceller.schedule(new TimerTask() {
            @Override
            public void run() {
                try {
                    monitor.setCanceled(true);
                } catch (Throwable e) {
                }
            }

        }, timeout * 1000);

        try {
            Result result = executor.execute();
            canceller.cancel();
        } catch (TestExecutionException e) {

            String reason = e.getResult().getResultsAsString();
            canceller.cancel();
            ResourceManager.logException(e, reason);

            if (!ErrorDialog.AUTOMATED_MODE) { // Avoid displaying a window while running automated mode
                DialogManager.asyncDisplayErrorMessage(MessageUtil.getString("error"), reason, e);
            }
        } catch (Throwable e) {
            canceller.cancel();
            ResourceManager.logException(e);
            if (!ErrorDialog.AUTOMATED_MODE) { // Avoid displaying a window while running automated mode
                DialogManager.asyncDisplayErrorMessage(MessageUtil.getString("error"),
                        MessageUtil.getString("an_error_occured_while_running_offline_tool"), e);
            }
            return ret;
        }
    }
    dcp.updateWithOfflines();

    generateFromFile(ww, dcp, monitor);
    return ret;
}

From source file:com.aol.advertising.qiao.injector.PatternMatchFileInjector.java

private void tryAcquireLock(long checksum) throws IOException, InterruptedException {
    if (logger.isDebugEnabled())
        logger.debug("try acquiring access lock for file with checksum=" + checksum);

    while (fileLockManager.containsFileLock(checksum) && running.get())
        CommonUtils.sleepButInterruptable(100);

    if (!running.get())
        throw new InterruptedException("interrupted");

    if (logger.isDebugEnabled())
        logger.debug("access lock acquired");
}

From source file:io.pyd.synchro.SyncJob.java

public void run() {

    try {//from   w w w. j  ava 2s .  c o  m
        monitor = CoreManager.getInstance().getProgressMonitor();

        AjxpHttpClient.clearCookiesStatic();
        // instantiate the daos
        ConnectionSource connectionSource = getCoreManager().getConnection();

        nodeDao = DaoManager.createDao(connectionSource, Node.class);
        syncChangeDao = DaoManager.createDao(connectionSource, SyncChange.class);
        syncLogDao = DaoManager.createDao(connectionSource, SyncLog.class);
        propertyDao = DaoManager.createDao(connectionSource, Property.class);

        syncLogDetailsDao = DaoManager.createDao(connectionSource, SyncLogDetails.class);

        currentRepository = getCoreManager().getSynchroNode(currentJobNodeID, nodeDao);
        if (currentRepository == null) {
            throw new Exception("The database returned an empty node.");
        }

        // check if core folder exists - if NOT, quit and do nothing
        if (!testRootNodeExists()) {
            return;
        }

        getCoreManager().updateSynchroState(currentRepository, (localWatchOnly ? false : true));

        currentRepository.setStatus(Node.NODE_STATUS_LOADING);
        try {
            updateRunningStatus(RUNNING_STATUS_INITIALIZING, (localWatchOnly ? false : true));
        } catch (SQLException sE) {
            Thread.sleep(100);
            updateRunningStatus(RUNNING_STATUS_INITIALIZING, (localWatchOnly ? false : true));
        }
        nodeDao.update(currentRepository);
        Server s = new Server(currentRepository.getParent());
        RestStateHolder restStateHolder = RestStateHolder.getInstance();
        restStateHolder.setServer(s);
        restStateHolder.setRepository(currentRepository);
        // set upload chunk size for 16K
        restStateHolder.setFileUploadChunkSize(RestStateHolder.FILE_UPLOAD_CHUNK_16K);
        // 5M for big files
        restStateHolder.setFileUploadChunkSizeBigFile(RestStateHolder.FILE_UPLOAD_CHUNK_5M);

        AjxpAPI.getInstance().setServer(s);
        currentLocalFolder = new File(currentRepository.getPropertyValue("target_folder"));
        direction = currentRepository.getPropertyValue("synchro_direction");

        // if(!localWatchOnly) {
        // getCoreManager().notifyUser(getMessage("job_running"),
        // "Synchronizing " + s.getUrl());
        // }
        updateRunningStatus(RUNNING_STATUS_PREVIOUS_CHANGES, (localWatchOnly ? false : true));
        final List<SyncChange> previouslyRemaining = syncChangeDao.queryForEq("jobId", currentJobNodeID);
        // Map<String, Object[]> previousChanges = new TreeMap<String,
        // Object[]>();
        Map<String, Object[]> previousChanges = createMapDBFile("previous");
        // by default - do nothing
        int action = SyncJob.TASK_DO_NOTHING;
        // check if user want to keep remote automatically
        if (autoKeepRemoteFile) {
            action = SyncJob.TASK_SOLVE_KEEP_THEIR;
        } else if (autoKeepLocalFile) {
            action = SyncJob.TASK_SOLVE_KEEP_MINE;
        }

        boolean unsolvedConflicts = SyncChange.syncChangesToTreeMap(previouslyRemaining, previousChanges,
                action);
        Map<String, Object[]> again = null;
        if (!localWatchOnly && unsolvedConflicts) {
            this.exitWithStatusAndNotify(Node.NODE_STATUS_ERROR, "job_blocking_conflicts_title",
                    "job_blocking_conflicts");
            return;
        }

        updateRunningStatus(RUNNING_STATUS_LOCAL_CHANGES, (localWatchOnly ? false : true));
        if (clearSnapshots) {
            this.clearSnapshot("local_snapshot");
            this.clearSnapshot("remote_snapshot");
        }
        List<Node> localSnapshot = EhcacheListFactory.getInstance().getList(LOCAL_SNAPSHOT_LIST);
        List<Node> remoteSnapshot = EhcacheListFactory.getInstance().getList(REMOTE_SNAPSHOT_LIST);
        Node localRootNode = loadRootAndSnapshot("local_snapshot", localSnapshot, currentLocalFolder);
        Map<String, Object[]> localDiff = loadLocalChanges(localSnapshot);

        if (unsolvedConflicts) {
            this.exitWithStatusAndNotify(Node.NODE_STATUS_ERROR, "job_blocking_conflicts_title",
                    "job_blocking_conflicts");
            return;
        }
        if (localWatchOnly && localDiff.size() == 0 && previousChanges.size() == 0) {
            this.exitWithStatus(Node.NODE_STATUS_LOADED);
            return;
        }

        // If we are here, then we must have detected some changes
        updateRunningStatus(RUNNING_STATUS_TESTING_CONNEXION);
        if (!testConnexion()) {
            return;
        }

        updateRunningStatus(RUNNING_STATUS_REMOTE_CHANGES);
        Node remoteRootNode = loadRootAndSnapshot("remote_snapshot", remoteSnapshot, null);
        Map<String, Object[]> remoteDiff = null;
        try {
            remoteDiff = loadRemoteChanges(remoteSnapshot);
        } catch (SynchroOperationException e) {
            // there was problem with server response - cannot go further!
            this.exitWithStatusAndNotify(Node.NODE_STATUS_ERROR, "job_server_didnt_responsed_title",
                    "job_server_didnt_responsed");
            return;
        }

        Logger.getRootLogger().info("LOCAL DIFFS: " + localDiff.size());
        Logger.getRootLogger().info("REMOTE DIFFS: " + remoteDiff.size());

        if (previousChanges.size() > 0) {
            updateRunningStatus(RUNNING_STATUS_PREVIOUS_CHANGES);
            Logger.getRootLogger().debug("Getting previous tasks");
            again = applyChanges(previousChanges, monitor, MonitorTaskType.APPLY_PREVIOUS_CHANGES);

            if (previouslyRemaining.size() > 999) {
                syncChangeDao.callBatchTasks(new Callable<Void>() {
                    public Void call() throws Exception {
                        for (int i = 0; i < previouslyRemaining.size(); i++) {
                            syncChangeDao.delete(previouslyRemaining.get(i));
                        }
                        return null;
                    }
                });

            } else {
                syncChangeDao.delete(previouslyRemaining);
            }
            this.clearSnapshot("remaining_nodes");
        }
        if (this.interruptRequired) {
            throw new InterruptedException("Interrupt required");
        }
        updateRunningStatus(RUNNING_STATUS_COMPARING_CHANGES);
        Map<String, Object[]> changes = mergeChanges(remoteDiff, localDiff);
        updateRunningStatus(RUNNING_STATUS_APPLY_CHANGES);
        Map<String, Object[]> remainingChanges = applyChanges(changes, monitor, MonitorTaskType.APPLY_CHANGES);
        if (again != null && again.size() > 0) {
            remainingChanges.putAll(again);
        }
        if (remainingChanges.size() > 0) {
            List<SyncChange> c = SyncChange.MapToSyncChanges(remainingChanges, currentJobNodeID);
            Node remainingRoot = loadRootAndSnapshot("remaining_nodes", null, null);
            for (int i = 0; i < c.size(); i++) {
                SyncChangeValue cv = c.get(i).getChangeValue();
                Node changeNode = cv.n;
                changeNode.setParent(remainingRoot);
                if (changeNode.id == 0 || !nodeDao.idExists(changeNode.id + "")) { // Not
                    // yet
                    // created!
                    nodeDao.create(changeNode);
                    Map<String, String> pValues = new HashMap<String, String>();
                    for (Property p : changeNode.properties) {
                        pValues.put(p.getName(), p.getValue());
                    }
                    propertyDao.delete(changeNode.properties);
                    Iterator<Map.Entry<String, String>> it = pValues.entrySet().iterator();
                    while (it.hasNext()) {
                        Map.Entry<String, String> ent = it.next();
                        changeNode.addProperty(ent.getKey(), ent.getValue());
                    }
                    c.get(i).setChangeValue(cv);
                } else {
                    nodeDao.update(changeNode);
                }
                syncChangeDao.create(c.get(i));
            }
        }
        updateRunningStatus(RUNNING_STATUS_CLEANING);
        takeLocalSnapshot(localRootNode, null, true, localSnapshot);
        clearSnapshot("local_tmp");

        try {
            remoteSnapshot = EhcacheListFactory.getInstance().getList(REMOTE_SNAPSHOT_LIST);
            takeRemoteSnapshot(remoteRootNode, remoteSnapshot, true);
        } catch (SynchroOperationException e) {
            // there was problem with server response - cannot go further!
            this.exitWithStatusAndNotify(Node.NODE_STATUS_ERROR, "job_server_didnt_responsed_title",
                    "job_server_didnt_responsed");
            return;
        }
        clearSnapshot("remote_tmp");

        cleanDB();

        // INDICATES THAT THE JOB WAS CORRECTLY SHUTDOWN
        currentRepository.setStatus(Node.NODE_STATUS_LOADED);
        currentRepository.setLastModified(new Date());
        nodeDao.update(currentRepository);

        SyncLog sl = new SyncLog();
        String status;
        String summary = "";
        if (countConflictsDetected > 0) {
            status = SyncLog.LOG_STATUS_CONFLICTS;
            summary = getMessage("job_status_conflicts").replace("%d", countConflictsDetected + "");
        } else if (countResourcesErrors > 0) {
            status = SyncLog.LOG_STATUS_ERRORS;
            summary = getMessage("job_status_errors").replace("%d", countResourcesErrors + "");
        } else {
            if (countResourcesInterrupted > 0)
                status = SyncLog.LOG_STATUS_INTERRUPT;
            else
                status = SyncLog.LOG_STATUS_SUCCESS;
            if (countFilesDownloaded > 0) {
                summary = getMessage("job_status_downloads").replace("%d", countFilesDownloaded + "");
            }
            if (countFilesUploaded > 0) {
                summary += getMessage("job_status_uploads").replace("%d", countFilesUploaded + "");
            }
            if (countResourcesSynchronized > 0) {
                summary += getMessage("job_status_resources").replace("%d", countResourcesSynchronized + "");
            }
            if (summary.equals("")) {
                summary = getMessage("job_status_nothing");
            }
        }
        sl.jobDate = (new Date()).getTime();
        sl.jobStatus = status;
        sl.jobSummary = summary;
        sl.synchroNode = currentRepository;
        syncLogDao.create(sl);

        // if there are any errors we just save them connected with actual
        // SyncLog
        Iterator<Entry<String, String>> errorMessagesIterator = errorMessages.entrySet().iterator();
        while (errorMessagesIterator.hasNext()) {
            Entry<String, String> entry = errorMessagesIterator.next();
            SyncLogDetails details = new SyncLogDetails();
            details.setFileName(entry.getKey());
            details.setMessage(entry.getValue());
            details.setParentLog(sl);

            syncLogDetailsDao.create(details);
        }
        // clear error messages for new synchronisation
        errorMessages.clear();

        getCoreManager().updateSynchroState(currentRepository, false);
        getCoreManager().releaseConnection();
        DaoManager.clearCache();

    } catch (InterruptedException ie) {

        getCoreManager().notifyUser("Stopping", "Last synchro was interrupted on user demand",
                this.currentJobNodeID);
        try {
            this.exitWithStatus(Node.NODE_STATUS_FRESH);
        } catch (SQLException e) {
        }

    } catch (Exception e) {

        e.printStackTrace();
        String message = e.getMessage();
        if (message == null && e.getCause() != null)
            message = e.getCause().getMessage();
        getCoreManager().notifyUser("Error", CoreManager.getMessage("err_generic") + ": " + message,
                this.currentJobNodeID, true);
        if (currentRepository != null) {
            currentRepository.setStatus(Node.NODE_STATUS_ERROR);
            try {
                updateRunningStatus(RUNNING_STATUS_CLEANING, false);
                nodeDao.update(currentRepository);
                clearTmpSnapshots();
            } catch (SQLException e1) {
                e1.printStackTrace();
            }
            getCoreManager().updateSynchroState(currentRepository, false);
        }
        getCoreManager().releaseConnection();
        DaoManager.clearCache();

    } finally {
        // synchronisation is finished (or not) - we need to close all
        // mapDBs
        for (DB db : mapDBs) {
            db.close();
        }

    }
}

From source file:org.alfresco.extension.bulkimport.impl.BatchImporterImpl.java

private final void importVersionMetadata(final NodeRef nodeRef, final BulkImportItemVersion version,
        final boolean dryRun) throws InterruptedException {
    String type = version.getType();
    Set<String> aspects = version.getAspects();
    Map<String, Serializable> metadata = version.getMetadata();

    if (type != null) {
        if (dryRun) {
            if (info(log))
                info(log, "[DRY RUN] Would have set type of '" + String.valueOf(nodeRef) + "' to '"
                        + String.valueOf(type) + "'.");
        } else {/*from  w w  w .  jav  a 2  s  .  c o  m*/
            if (trace(log))
                trace(log,
                        "Setting type of '" + String.valueOf(nodeRef) + "' to '" + String.valueOf(type) + "'.");
            nodeService.setType(nodeRef, createQName(serviceRegistry, type));
        }
    }

    if (aspects != null) {
        for (final String aspect : aspects) {
            if (importStatus.isStopping() || Thread.currentThread().isInterrupted())
                throw new InterruptedException(
                        Thread.currentThread().getName() + " was interrupted. Terminating early.");

            if (dryRun) {
                if (info(log))
                    info(log, "[DRY RUN] Would have added aspect '" + aspect + "' to '"
                            + String.valueOf(nodeRef) + "'.");
            } else {
                if (trace(log))
                    trace(log, "Adding aspect '" + aspect + "' to '" + String.valueOf(nodeRef) + "'.");
                nodeService.addAspect(nodeRef, createQName(serviceRegistry, aspect), null);
            }
        }
    }

    if (version.hasMetadata()) {
        if (metadata == null)
            throw new IllegalStateException(
                    "The import source has logic errors - it says it has metadata, but the metadata is null.");

        // QName all the keys.  It's baffling that NodeService doesn't have a method that accepts a Map<String, Serializable>, when things like VersionService do...
        Map<QName, Serializable> qNamedMetadata = new HashMap<>(metadata.size());

        for (final String key : metadata.keySet()) {
            if (importStatus.isStopping() || Thread.currentThread().isInterrupted())
                throw new InterruptedException(
                        Thread.currentThread().getName() + " was interrupted. Terminating early.");

            QName keyQName = createQName(serviceRegistry, key);
            Serializable value = metadata.get(key);

            qNamedMetadata.put(keyQName, value);
        }

        if (dryRun) {
            if (info(log))
                info(log, "[DRY RUN] Would have added the following properties to '" + String.valueOf(nodeRef)
                        + "':\n" + Arrays.toString(qNamedMetadata.entrySet().toArray()));
        } else {
            try {
                if (trace(log))
                    trace(log, "Adding the following properties to '" + String.valueOf(nodeRef) + "':\n"
                            + Arrays.toString(qNamedMetadata.entrySet().toArray()));
                nodeService.addProperties(nodeRef, qNamedMetadata);
            } catch (final InvalidNodeRefException inre) {
                if (!nodeRef.equals(inre.getNodeRef())) {
                    // Caused by an invalid NodeRef in the metadata (e.g. in an association)
                    throw new IllegalStateException("Invalid nodeRef found in metadata file '"
                            + version.getMetadataSource() + "'.  "
                            + "Probable cause: an association is being populated via metadata, but the "
                            + "NodeRef for the target of that association ('" + inre.getNodeRef()
                            + "') is invalid.  " + "Please double check your metadata file and try again.",
                            inre);
                } else {
                    // Logic bug in the BFSIT.  :-(
                    throw inre;
                }
            }
        }
    }
}

From source file:com.nttec.everychan.chans.makaba.MakabaModule.java

@Override
public String sendPost(SendPostModel model, ProgressListener listener, CancellableTask task) throws Exception {
    String url = domainUrl + "makaba/posting.fcgi?json=1";
    ExtendedMultipartBuilder postEntityBuilder = ExtendedMultipartBuilder.create().setDelegates(listener, task)
            .addString("task", "post").addString("board", model.boardName)
            .addString("thread", model.threadNumber == null ? "0" : model.threadNumber);

    postEntityBuilder.addString("comment", model.comment);

    if (captchaId != null) {
        postEntityBuilder.addString("captcha_type", "2chaptcha").addString("2chaptcha_id", captchaId)
                .addString("2chaptcha_value", model.captchaAnswer);
    }/*from w  w w .  j  av  a 2s  .co  m*/
    if (task != null && task.isCancelled())
        throw new InterruptedException("interrupted");

    if (model.subject != null)
        postEntityBuilder.addString("subject", model.subject);
    if (model.name != null)
        postEntityBuilder.addString("name", model.name);
    if (model.sage)
        postEntityBuilder.addString("email", "sage");
    else if (model.email != null)
        postEntityBuilder.addString("email", model.email);

    if (model.attachments != null) {
        String[] images = new String[] { "image1", "image2", "image3", "image4" };
        for (int i = 0; i < model.attachments.length; ++i) {
            postEntityBuilder.addFile(images[i], model.attachments[i], model.randomHash);
        }
    }

    if (model.icon != -1)
        postEntityBuilder.addString("icon", Integer.toString(model.icon));

    //if (model.watermark) postEntityBuilder.addString("water_mark", "on");
    if (model.custommark)
        postEntityBuilder.addString("op_mark", "1");

    HttpRequestModel request = HttpRequestModel.builder().setPOST(postEntityBuilder.build()).build();
    String response = null;
    try {
        response = HttpStreamer.getInstance().getStringFromUrl(url, request, httpClient, null, task, true);
    } catch (HttpWrongStatusCodeException e) {
        checkCloudflareError(e, url);
        throw e;
    }
    saveUsercodeCookie();
    JSONObject makabaResult = new JSONObject(response);
    try {
        String statusResult = makabaResult.getString("Status");
        if (statusResult.equals("OK")) {
            try {
                if (model.threadNumber != null) {
                    UrlPageModel redirect = new UrlPageModel();
                    redirect.type = UrlPageModel.TYPE_THREADPAGE;
                    redirect.chanName = CHAN_NAME;
                    redirect.boardName = model.boardName;
                    redirect.threadNumber = model.threadNumber;
                    redirect.postNumber = Long.toString(makabaResult.getLong("Num"));
                    return buildUrl(redirect);
                }
            } catch (Exception e) {
                Logger.e(TAG, e);
            }
            return null;
        } else if (statusResult.equals("Redirect")) {
            UrlPageModel redirect = new UrlPageModel();
            redirect.type = UrlPageModel.TYPE_THREADPAGE;
            redirect.chanName = CHAN_NAME;
            redirect.boardName = model.boardName;
            redirect.threadNumber = Long.toString(makabaResult.getLong("Target"));
            return buildUrl(redirect);
        }
    } catch (Exception e) {
    }
    throw new Exception(makabaResult.getString("Reason"));
}

From source file:io.druid.indexing.jdbc.JDBCIndexTask.java

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();/*  w  w  w.jav a2  s  .  c  om*/
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;

    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }

    runThread = Thread.currentThread();

    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema,
            new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler()
            .addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics),
                    ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));

    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUsername(ioConfig.getUser());
    dataSource.setPassword(ioConfig.getPassword());
    dataSource.setUrl(ioConfig.getConnectURI());
    dataSource.setDriverClassLoader(getClass().getClassLoader());

    final String table = ioConfig.getTableName();

    if (!StringUtils.isEmpty(ioConfig.getDriverClass())) {
        dataSource.setDriverClassName(ioConfig.getDriverClass());
    }

    final Handle handle = new DBI(dataSource).open();
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
            final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics)) {
        toolbox.getDataSegmentServerAnnouncer().announce();
        appenderator = appenderator0;

        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getJdbcOffsets().getOffsetMaps());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final JDBCOffsets restoredNextPartitions = toolbox.getObjectMapper()
                    .convertValue(restoredMetadataMap.get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            nextOffsets.putAll(restoredNextPartitions.getOffsetMaps());

            // Sanity checks.
            if (!restoredNextPartitions.getTable().equals(ioConfig.getTableName())) {
                throw new ISE("WTF?! Restored table[%s] but expected table[%s]",
                        restoredNextPartitions.getTable(), ioConfig.getTableName());
            }

            if (!nextOffsets.equals(ioConfig.getJdbcOffsets().getOffsetMaps())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets,
                        ioConfig.getJdbcOffsets().getOffsetMaps());
            }
        }

        // Set up sequenceNames.

        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum,
                    String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }

        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {
            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);

                return new Committer() {
                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_OFFSETS,
                                new JDBCOffsets(ioConfig.getJdbcOffsets().getTable(), snapshot));

                    }

                    @Override
                    public void run() {
                        // Do nothing.
                    }
                };
            }
        };

        //      Set<Integer> assignment = assignPartitionsAndSeekToNext(handle);
        //      boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            //        while (stillReading) {
            //          if (possiblyPause(assignment)) {
            //             The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
            //             partitions upon resuming. This is safe even if the end offsets have not been modified.
            //            assignment = assignPartitionsAndSeekToNext(handle);
            //            if (assignment.isEmpty()) {
            //              log.info("All partitions have been fully read");
            //              publishOnStop = true;
            //              stopRequested = true;
            //            }
            //          }
            //          if (stopRequested) {
            //            break;
            //          }

            final String query = (ioConfig.getQuery() != null) ? ioConfig.getQuery()
                    : makeQuery(ioConfig.getColumns(), ioConfig.getJdbcOffsets());
            org.skife.jdbi.v2.Query<Map<String, Object>> dbiQuery = handle.createQuery(query);

            final ResultIterator<InputRow> rowIterator = dbiQuery.map(new ResultSetMapper<InputRow>() {
                List<String> queryColumns = (ioConfig.getColumns() == null) ? Lists.<String>newArrayList()
                        : ioConfig.getColumns();
                List<Boolean> columnIsNumeric = Lists.newArrayList();

                @Override
                public InputRow map(final int index, final ResultSet r, final StatementContext ctx)
                        throws SQLException {
                    try {
                        if (queryColumns.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                queryColumns.add(metadata.getColumnName(idx));
                            }
                            Preconditions.checkArgument(queryColumns.size() > 0,
                                    String.format("No column in table [%s]", table));
                            verifyParserSpec(parser.getParseSpec(), queryColumns);
                        }
                        if (columnIsNumeric.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            Preconditions.checkArgument(metadata.getColumnCount() >= queryColumns.size(),
                                    String.format(
                                            "number of column names [%d] exceeds the actual number of returning column values [%d]",
                                            queryColumns.size(), metadata.getColumnCount()));
                            columnIsNumeric.add(false); // dummy to make start index to 1
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                boolean isNumeric = false;
                                int type = metadata.getColumnType(idx);
                                switch (type) {
                                case BIGINT:
                                case DECIMAL:
                                case DOUBLE:
                                case FLOAT:
                                case INTEGER:
                                case NUMERIC:
                                case SMALLINT:
                                case TINYINT:
                                    isNumeric = true;
                                    break;
                                }
                                columnIsNumeric.add(isNumeric);
                            }
                        }
                        final Map<String, Object> columnMap = Maps.newHashMap();
                        int columnIdx = 1;
                        for (String column : queryColumns) {
                            Object objToPut = null;
                            if (table != null) {
                                objToPut = r.getObject(column);
                            } else {
                                objToPut = r.getObject(columnIdx);
                            }
                            columnMap.put(column, objToPut == null ? columnIsNumeric.get(columnIdx) : objToPut);

                            columnIdx++;
                        }
                        return parser.parse(columnMap);

                    } catch (IllegalArgumentException e) {
                        throw new SQLException(e);
                    }
                }
            }).iterator();

            org.skife.jdbi.v2.Query<Map<String, Object>> maxItemQuery = handle
                    .createQuery(makeMaxQuery(ioConfig.getJdbcOffsets()));
            long currOffset = maxItemQuery != null ? (long) maxItemQuery.list(1).get(0).get("MAX") : 0;

            while (rowIterator.hasNext()) {
                InputRow row = rowIterator.next();
                try {
                    if (!ioConfig.getMinimumMessageTime().isPresent()
                            || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {

                        final String sequenceName = sequenceNames.get(nextOffsets.keySet().toArray()[0]); //TODO::: check data
                        final AppenderatorDriverAddResult addResult = driver.add(row, sequenceName,
                                committerSupplier);

                        if (addResult.isOk()) {
                            // If the number of rows in the segment exceeds the threshold after adding a row,
                            // move the segment out from the active segments of AppenderatorDriver to make a new segment.
                            if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) {
                                driver.moveSegmentOut(sequenceName,
                                        ImmutableList.of(addResult.getSegmentIdentifier()));
                            }
                        } else {
                            // Failure to allocate segment puts determinism at risk, bail out to be safe.
                            // May want configurable behavior here at some point.
                            // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                            throw new ISE("Could not allocate segment for row with timestamp[%s]",
                                    row.getTimestamp());
                        }

                        fireDepartmentMetrics.incrementProcessed();
                    } else {
                        fireDepartmentMetrics.incrementThrownAway();
                    }
                } catch (ParseException e) {
                    if (tuningConfig.isReportParseExceptions()) {
                        throw e;
                    } else {
                        log.debug(e, "Dropping unparseable row from row[%d] .", row);

                        fireDepartmentMetrics.incrementUnparseable();
                    }
                }
            }
            nextOffsets.put((int) ioConfig.getJdbcOffsets().getOffsetMaps().keySet().toArray()[0], currOffset);
            //          if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition()))
            //              && assignment.remove(record.partition())) {
            //            log.info("Finished reading table[%s], partition[%,d].", record.topic(), record.partition());
            //            stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
            //          }
            //        }
        } finally {
            driver.persist(committerSupplier.get()); // persist pending data
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }

            status = Status.PUBLISHING;
        }

        final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> {

            final JDBCOffsets finalOffsets = toolbox.getObjectMapper()
                    .convertValue(((Map) commitMetadata).get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            // Sanity check, we should only be publishing things that match our desired end state. //TODO::: Santiny Check!
            //        if (!endOffsets.equals(finalOffsets.getOffsetMaps())) {
            //          throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
            //        }

            final SegmentTransactionalInsertAction action;

            if (ioConfig.isUseTransaction()) {
                action = new SegmentTransactionalInsertAction(segments,
                        new JDBCDataSourceMetadata(ioConfig.getJdbcOffsets()),
                        new JDBCDataSourceMetadata(finalOffsets) //TODO::: Check Values
                );
            } else {
                action = new SegmentTransactionalInsertAction(segments, null, null);
            }

            log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());

            return toolbox.getTaskActionClient().submit(action).isSuccess();
        };

        // Supervised kafka tasks are killed by JDBCSupervisor if they are stuck during publishing segments or waiting
        // for hand off. See JDBCSupervisorIOConfig.completionTimeout.
        final SegmentsAndMetadata published = driver
                .publish(publisher, committerSupplier.get(), sequenceNames.values()).get();

        final SegmentsAndMetadata handedOff;
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOff = driver.registerHandoff(published).get();
        } else {
            handedOff = driver.registerHandoff(published).get(tuningConfig.getHandoffConditionTimeout(),
                    TimeUnit.MILLISECONDS);
        }

        if (handedOff == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ")
                    .join(Iterables.transform(handedOff.getSegments(), new Function<DataSegment, String>() {
                        @Override
                        public String apply(DataSegment input) {
                            return input.getIdentifier();
                        }
                    })), handedOff.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException
                && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }

        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }

        log.info("The task was asked to stop before completing");
    } finally

    {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
        handle.close();
    }

    toolbox.getDataSegmentServerAnnouncer().unannounce();

    //TODO::implement
    return success();

}

From source file:org.kawanfw.sql.jdbc.ConnectionHttp.java

/**
 * Test if user has hit cancel. if yes, throw a wrapped
 * HttpTransferInterruptedException//  ww w  .  j  a v  a 2s.  com
 * 
 * @throws SQLException
 *             the wrapped InterruptedException if user has hit cancel
 */
public void testIfUploadInterrupted() throws SQLException {
    if (cancelled.get()) {
        throw new SQLException(new InterruptedException(Tag.PRODUCT + " File upload interrupted by user."));
    }
}

From source file:net.sf.firemox.tools.MToolKit.java

/**
 * Return the loaded picture from a local place.
 * /*www.  ja v  a  2 s  . c o m*/
 * @param localFile
 *          the local file name.
 * @return the local picture.
 * @throws InterruptedException
 */
public static Image getLocalPicture(String localFile) throws InterruptedException {
    final Image result = Toolkit.getDefaultToolkit().getImage(getFile(localFile, true).getAbsolutePath());
    if (result == null) {
        throw new InterruptedException("Picture " + localFile + " has not been found");
    }
    final MediaTracker tracker = new MediaTracker(MagicUIComponents.magicForm);
    tracker.addImage(result, 0);
    tracker.waitForAll();
    if (tracker.isErrorAny()) {
        tracker.removeImage(result, 0);
        tracker.waitForAll();
        result.flush();
        throw new InterruptedException("Malformed picture " + localFile);
    }
    return result;
}

From source file:org.apache.kylin.storage.hbase.util.CubeMigrationCLI.java

private static void renameHDFSPath(String srcPath, String dstPath) throws IOException, InterruptedException {
    int nRetry = 0;
    int sleepTime = 5000;
    while (!hdfsFS.rename(new Path(srcPath), new Path(dstPath))) {
        ++nRetry;/*from   ww w.j  ava  2s.  c o  m*/
        if (nRetry > 3) {
            throw new InterruptedException("Cannot rename folder " + srcPath + " to folder " + dstPath);
        } else {
            Thread.sleep(sleepTime * nRetry * nRetry);
        }
    }
}