Example usage for java.lang InterruptedException InterruptedException

List of usage examples for java.lang InterruptedException InterruptedException

Introduction

In this page you can find the example usage for java.lang InterruptedException InterruptedException.

Prototype

public InterruptedException() 

Source Link

Document

Constructs an InterruptedException with no detail message.

Usage

From source file:com.orange.cepheus.broker.controller.NgsiControllerTest.java

@Test
public void postQueryContextWithInterruptedException() throws Exception {

    //localRegistrations mock return always a providingApplication
    when(providingApplication.hasNext()).thenReturn(true);
    when(providingApplication.next()).thenReturn(new URI("http//iotagent:1234"));
    when(localRegistrations.findProvidingApplication(any(), any())).thenReturn(providingApplication);

    when(queryContextResponseListenableFuture.get()).thenThrow(new InterruptedException());

    //ngsiclient mock return always createUpdateContextREsponseTemperature when call updateContext
    when(ngsiClient.queryContext(any(), any(), any())).thenReturn(queryContextResponseListenableFuture);

    mockMvc.perform(post("/v1/queryContext").content(json(mapper, createQueryContextTemperature()))
            .contentType(MediaType.APPLICATION_JSON)).andExpect(status().isOk())
            .andExpect(MockMvcResultMatchers.jsonPath("$.errorCode.code").value("500"))
            .andExpect(//from   ww w.j a  va2s.  c  o m
                    MockMvcResultMatchers.jsonPath("$.errorCode.reasonPhrase").value("Receiver internal error"))
            .andExpect(MockMvcResultMatchers.jsonPath("$.errorCode.details")
                    .value("An unknown error at the receiver has occured"));
}

From source file:org.eclipse.mylyn.tasks.ui.wizards.AbstractRepositorySettingsPage.java

/**
 * Validate settings provided by the {@link #getValidator(TaskRepository) validator}, typically the server settings.
 *
 * @since 2.0//from w  w  w .ja va 2 s  .  c o m
 */
protected void validateSettings() {
    TaskRepository newTaskRepository = createTaskRepository();
    final Validator validator = getValidator(newTaskRepository);
    if (validator == null) {
        return;
    }

    try {
        getWizard().getContainer().run(true, true, new IRunnableWithProgress() {
            public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException {
                monitor.beginTask(Messages.AbstractRepositorySettingsPage_Validating_server_settings,
                        IProgressMonitor.UNKNOWN);
                try {
                    validator.run(monitor);
                    if (validator.getStatus() == null) {
                        validator.setStatus(Status.OK_STATUS);
                    }
                } catch (CoreException e) {
                    validator.setStatus(e.getStatus());
                } catch (OperationCanceledException e) {
                    validator.setStatus(Status.CANCEL_STATUS);
                    throw new InterruptedException();
                } catch (Exception e) {
                    throw new InvocationTargetException(e);
                } finally {
                    monitor.done();
                }
            }
        });
    } catch (InvocationTargetException e) {
        StatusManager.getManager()
                .handle(new Status(IStatus.ERROR, TasksUiPlugin.ID_PLUGIN,
                        Messages.AbstractRepositorySettingsPage_Internal_error_validating_repository, e),
                        StatusManager.SHOW | StatusManager.LOG);
        return;
    } catch (InterruptedException e) {
        // canceled
        return;
    }

    getWizard().getContainer().updateButtons();
    applyValidatorResult(validator);
    if (isValid) {
        saveToValidatedProperties(newTaskRepository);
    }
}

From source file:com.mirth.connect.donkey.server.channel.Channel.java

/**
 * Process a source message and return the final processed composite message once all
 * destinations have completed and the post-processor has executed
 * /*from ww  w .  ja va  2s .c o  m*/
 * @param sourceMessage
 *            A source connector message
 * @return The final processed composite message containing the source message and all
 *         destination messages
 * @throws InterruptedException
 */
protected Message process(ConnectorMessage sourceMessage, boolean markAsProcessed) throws InterruptedException {
    ThreadUtils.checkInterruptedStatus();
    long messageId = sourceMessage.getMessageId();

    if (sourceMessage.getMetaDataId() != 0 || sourceMessage.getStatus() != Status.RECEIVED) {
        throw new RuntimeException("Received a source message with an invalid state");
    }

    // create a final merged message that will contain the merged maps from each destination chain's processed message
    Message finalMessage = new Message();
    finalMessage.setMessageId(messageId);
    finalMessage.setServerId(serverId);
    finalMessage.setChannelId(channelId);
    finalMessage.setReceivedDate(sourceMessage.getReceivedDate());
    finalMessage.getConnectorMessages().put(0, sourceMessage);

    // run the raw message through the pre-processor script
    String processedRawContent = null;

    ThreadUtils.checkInterruptedStatus();

    try {
        processedRawContent = preProcessor.doPreProcess(sourceMessage);
    } catch (DonkeyException e) {
        sourceMessage.setStatus(Status.ERROR);
        sourceMessage.setProcessingError(e.getFormattedError());
    }

    /*
     * TRANSACTION: Process Source - store processed raw content - update the source status -
     * store transformed content - store encoded content - update source maps - create connector
     * messages for each destination chain with RECEIVED status and maps
     */
    ThreadUtils.checkInterruptedStatus();
    DonkeyDao dao = daoFactory.getDao();

    try {
        if (sourceMessage.getStatus() == Status.ERROR) {
            dao.updateStatus(sourceMessage, Status.RECEIVED);

            if (StringUtils.isNotBlank(sourceMessage.getProcessingError())) {
                dao.updateErrors(sourceMessage);
            }

            ThreadUtils.checkInterruptedStatus();
            dao.commit(storageSettings.isDurable());
            dao.close();
            finishMessage(finalMessage, markAsProcessed);
            return finalMessage;
        }

        if (processedRawContent != null) {
            // store the processed raw content
            sourceMessage.setProcessedRaw(new MessageContent(channelId, messageId, 0, ContentType.PROCESSED_RAW,
                    processedRawContent, sourceConnector.getInboundDataType().getType(), false));
        }

        // send the message to the source filter/transformer and then update it's status
        try {
            sourceConnector.getFilterTransformerExecutor().processConnectorMessage(sourceMessage);
        } catch (DonkeyException e) {
            if (e instanceof MessageSerializerException) {
                eventDispatcher.dispatchEvent(new ErrorEvent(channelId, 0, messageId, ErrorEventType.SERIALIZER,
                        sourceConnector.getSourceName(), null, e.getMessage(), e));
            }

            sourceMessage.setStatus(Status.ERROR);
            sourceMessage.setProcessingError(e.getFormattedError());
        }

        dao.updateStatus(sourceMessage, Status.RECEIVED);

        // Set the source connector's custom column map
        sourceConnector.getMetaDataReplacer().setMetaDataMap(sourceMessage, metaDataColumns);

        // Store the custom columns
        if (!sourceMessage.getMetaDataMap().isEmpty() && storageSettings.isStoreCustomMetaData()) {
            ThreadUtils.checkInterruptedStatus();
            dao.insertMetaData(sourceMessage, metaDataColumns);
        }

        if (storageSettings.isStoreMaps()) {
            ThreadUtils.checkInterruptedStatus();

            // update the message maps generated by the filter/transformer
            dao.updateMaps(sourceMessage);

            // update the source map. The keys cannot change but the state of the values can.
            dao.updateSourceMap(sourceMessage);
        }

        // if the message was filtered or an error occurred, then finish
        if (sourceMessage.getStatus() != Status.TRANSFORMED) {
            if (storageSettings.isStoreProcessedRaw() && sourceMessage.getProcessedRaw() != null) {
                ThreadUtils.checkInterruptedStatus();
                dao.insertMessageContent(sourceMessage.getProcessedRaw());
            }

            if (storageSettings.isStoreTransformed() && sourceMessage.getTransformed() != null) {
                dao.insertMessageContent(sourceMessage.getTransformed());
            }

            if (StringUtils.isNotBlank(sourceMessage.getProcessingError())) {
                dao.updateErrors(sourceMessage);
            }

            ThreadUtils.checkInterruptedStatus();
            dao.commit();
            dao.close();

            finishMessage(finalMessage, markAsProcessed);
            return finalMessage;
        }

        // store the raw, transformed and encoded content
        boolean insertedContent = false;
        ThreadUtils.checkInterruptedStatus();

        if (storageSettings.isStoreProcessedRaw() && sourceMessage.getProcessedRaw() != null) {
            dao.batchInsertMessageContent(sourceMessage.getProcessedRaw());
            insertedContent = true;
        }

        if (storageSettings.isStoreTransformed() && sourceMessage.getTransformed() != null) {
            dao.batchInsertMessageContent(sourceMessage.getTransformed());
            insertedContent = true;
        }

        if (storageSettings.isStoreSourceEncoded() && sourceMessage.getEncoded() != null) {
            dao.batchInsertMessageContent(sourceMessage.getEncoded());
            insertedContent = true;
        }

        if (insertedContent) {
            dao.executeBatchInsertMessageContent(channelId);
        }

        // create a message for each destination chain
        Map<Integer, ConnectorMessage> destinationMessages = new HashMap<Integer, ConnectorMessage>();
        MessageContent sourceEncoded = sourceMessage.getEncoded();

        // get the list of destination meta data ids to send to
        Collection<Integer> metaDataIds = null;

        if (sourceMessage.getSourceMap().containsKey(Constants.DESTINATION_SET_KEY)) {
            metaDataIds = (Collection<Integer>) sourceMessage.getSourceMap().get(Constants.DESTINATION_SET_KEY);
        }

        List<DestinationChain> destinationChains = new ArrayList<DestinationChain>();

        for (DestinationChainProvider chainProvider : destinationChainProviders) {
            DestinationChain chain = chainProvider.getChain();

            // The order of the enabledMetaDataId list needs to be based on the chain order.
            // We do not use ListUtils here because there is no official guarantee of order.
            if (metaDataIds != null) {
                List<Integer> enabledMetaDataIds = new ArrayList<Integer>();
                for (Integer id : chainProvider.getMetaDataIds()) {
                    if (metaDataIds.contains(id)) {
                        enabledMetaDataIds.add(id);
                    }
                }
                chain.setEnabledMetaDataIds(enabledMetaDataIds);
            }

            // if any destinations in this chain are enabled, create messages for them
            if (!chain.getEnabledMetaDataIds().isEmpty()) {
                ThreadUtils.checkInterruptedStatus();
                Integer metaDataId = chain.getEnabledMetaDataIds().get(0);

                DestinationConnector destinationConnector = chainProvider.getDestinationConnectors()
                        .get(metaDataId);

                // create the raw content from the source's encoded content
                MessageContent raw = new MessageContent(channelId, messageId, metaDataId, ContentType.RAW,
                        sourceEncoded.getContent(), destinationConnector.getInboundDataType().getType(),
                        sourceEncoded.isEncrypted());

                // create the message and set the raw content
                ConnectorMessage message = new ConnectorMessage(channelId, name, messageId, metaDataId,
                        sourceMessage.getServerId(), Calendar.getInstance(), Status.RECEIVED);
                message.setConnectorName(destinationConnector.getDestinationName());
                message.setChainId(chainProvider.getChainId());
                message.setOrderId(destinationConnector.getOrderId());

                // We don't create a new map here because the source map is read-only and thus won't ever be changed
                message.setSourceMap(sourceMessage.getSourceMap());
                message.setChannelMap(new HashMap<String, Object>(sourceMessage.getChannelMap()));
                message.setResponseMap(new HashMap<String, Object>(sourceMessage.getResponseMap()));
                message.setRaw(raw);

                // store the new message, but we don't need to store the content because we will reference the source's encoded content
                dao.insertConnectorMessage(message, storageSettings.isStoreMaps(), true);

                destinationMessages.put(metaDataId, message);
            }

            destinationChains.add(chain);
        }

        ThreadUtils.checkInterruptedStatus();
        dao.commit();
        dao.close();

        /*
         * Construct a list of only the enabled destination chains. This is done because we
         * don't know beforehand which destination chain will be the "last" one.
         */
        List<DestinationChain> enabledChains = new ArrayList<DestinationChain>();
        for (DestinationChain chain : destinationChains) {
            if (!chain.getEnabledMetaDataIds().isEmpty()) {
                chain.setMessage(destinationMessages.get(chain.getEnabledMetaDataIds().get(0)));
                enabledChains.add(chain);
            }
        }

        if (!enabledChains.isEmpty()) {
            // Execute each destination chain (but the last one) and store the tasks in a list
            List<Future<List<ConnectorMessage>>> destinationChainTasks = new ArrayList<Future<List<ConnectorMessage>>>();

            for (int i = 0; i <= enabledChains.size() - 2; i++) {
                try {
                    DestinationChain chain = enabledChains.get(i);
                    chain.setName(
                            "Destination Chain Thread " + (i + 1) + " on " + name + " (" + channelId + ")");
                    destinationChainTasks.add(channelExecutor.submit(chain));
                } catch (RejectedExecutionException e) {
                    Thread.currentThread().interrupt();
                    throw new InterruptedException();
                }
            }

            List<ConnectorMessage> connectorMessages = null;

            // Always call the last chain directly rather than submitting it as a Future
            try {
                DestinationChain chain = enabledChains.get(enabledChains.size() - 1);
                chain.setName("Destination Chain Thread " + enabledChains.size() + " on " + name + " ("
                        + channelId + ")");
                connectorMessages = chain.call();
            } catch (Throwable t) {
                handleDestinationChainThrowable(t);
            }

            addConnectorMessages(finalMessage, sourceMessage, connectorMessages);

            // Get the result message from each destination chain's task and merge the map data into the final merged message. If an exception occurs, return immediately without sending the message to the post-processor.
            for (Future<List<ConnectorMessage>> task : destinationChainTasks) {
                connectorMessages = null;

                try {
                    connectorMessages = task.get();
                } catch (Exception e) {
                    handleDestinationChainThrowable(e);
                }

                addConnectorMessages(finalMessage, sourceMessage, connectorMessages);
            }
        }

        finishMessage(finalMessage, markAsProcessed);
        return finalMessage;
    } finally {
        if (!dao.isClosed()) {
            dao.close();
        }
    }
}

From source file:com.mirth.connect.donkey.server.channel.Channel.java

private void handleDestinationChainThrowable(Throwable t) throws OutOfMemoryError, InterruptedException {
    Throwable cause;// ww  w.  j  av a 2s .  c o m
    if (t instanceof ExecutionException) {
        cause = t.getCause();
    } else {
        cause = t;
    }

    // TODO: make sure we are catching out of memory errors correctly here
    if (cause.getMessage() != null && cause.getMessage().contains("Java heap space")) {
        logger.error(cause.getMessage(), cause);
        throw new OutOfMemoryError();
    }

    if (cause instanceof CancellationException) {
        Thread.currentThread().interrupt();
        throw new InterruptedException();
    } else if (cause instanceof InterruptedException) {
        Thread.currentThread().interrupt();
        throw (InterruptedException) cause;
    }

    throw new RuntimeException(cause);
}

From source file:io.pyd.synchro.SyncJob.java

/**
 * Rewrites remote file content to local file Many things can go wrong so
 * there are different exception types thrown When succeded -
 * <code>targetFile</code> contain <code>uploadFile</code> content
 * /*  www  .ja va 2s  .  co  m*/
 * @param uri
 * @param targetFile
 * @param uploadFile
 * @throws SynchroOperationException
 * @throws SynchroFileOperationException
 * @throws InterruptedException
 */
protected void uriContentToFile(URI uri, File targetFile, File uploadFile)
        throws SynchroOperationException, SynchroFileOperationException, InterruptedException {

    RestRequest rest = this.getRequest();
    int postedProgress = 0;
    int buffersize = 16384;
    int count = 0;
    HttpEntity entity;
    try {
        entity = rest.getNotConsumedResponseEntity(uri, null, uploadFile, true);
    } catch (Exception e) {
        throw new SynchroOperationException("Error during response entity: " + e.getMessage(), e);
    }
    long fullLength = entity.getContentLength();
    if (fullLength <= 0) {
        fullLength = 1;
    }

    Logger.getRootLogger().info("Downloading " + fullLength + " bytes");

    InputStream input = null;
    try {
        input = entity.getContent();
    } catch (IllegalStateException e) {
        throw new SynchroOperationException("Error during getting entity content: " + e.getMessage(), e);
    } catch (IOException e) {
        throw new SynchroOperationException("Error during getting entity content: " + e.getMessage(), e);
    }
    BufferedInputStream in = new BufferedInputStream(input, buffersize);

    FileOutputStream output;
    try {
        String dir = targetFile.getPath();
        if (dir != null) {
            dir = dir.substring(0, dir.lastIndexOf(File.separator));
            File dirExist = new File(dir);
            if (!dirExist.exists()) {
                Logger.getRootLogger().info("Need to create directory: " + dir);
                dirExist.mkdirs();
            }
        }

        output = new FileOutputStream(targetFile.getPath());
    } catch (FileNotFoundException e) {
        throw new SynchroFileOperationException("Error during file accessing: " + e.getMessage(), e);
    }
    BufferedOutputStream out = new BufferedOutputStream(output);

    byte data[] = new byte[buffersize];
    int total = 0;

    long startTime = System.nanoTime();
    long lastTime = startTime;
    int lastTimeTotal = 0;

    long secondLength = 1000000000;
    long interval = (long) 2 * secondLength;

    try {
        while ((count = in.read(data)) != -1) {
            long duration = System.nanoTime() - lastTime;

            int tmpTotal = total + count;
            // publishing the progress....
            int tmpProgress = (int) (tmpTotal * 100 / fullLength);
            if (tmpProgress - postedProgress > 0 || duration > secondLength) {
                if (duration > interval) {
                    lastTime = System.nanoTime();
                    long lastTimeBytes = (long) ((tmpTotal - lastTimeTotal) * secondLength / 1024 / 1000);
                    long speed = (lastTimeBytes / (duration));
                    double bytesleft = (double) (((double) fullLength - (double) tmpTotal) / 1024);
                    @SuppressWarnings("unused")
                    double ETC = bytesleft / (speed * 10);
                }
                if (tmpProgress != postedProgress) {
                    logChange(getMessage("job_log_downloading"),
                            targetFile.getName() + " - " + tmpProgress + "%");
                }
                postedProgress = tmpProgress;
            }
            out.write(data, 0, count);
            total = tmpTotal;
            if (this.interruptRequired) {
                break;
            }
        }
        out.flush();
        if (out != null) {
            out.close();
        }
        if (in != null) {
            in.close();
        }
    } catch (IOException e) {
        throw new SynchroFileOperationException("Error during file content rewriting: " + e.getMessage(), e);
    }
    if (this.interruptRequired) {
        rest.release();
        throw new InterruptedException();
    }
    rest.release();
}

From source file:org.apache.hadoop.mapred.JobTracker.java

JobTracker(final JobConf conf, String identifier, Clock clock, QueueManager qm)
        throws IOException, InterruptedException {
    this.queueManager = qm;
    this.clock = clock;
    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();
    // find the owner of the process
    // get the desired principal to load
    UserGroupInformation.setConfiguration(conf);
    SecurityUtil.login(conf, JT_KEYTAB_FILE, JT_USER_NAME, localMachine);

    long secretKeyInterval = conf.getLong(DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();//  w  w  w. j a  va 2  s  .c o m

    MAX_JOBCONF_SIZE = conf.getLong(MAX_USER_JOBCONF_SIZE_KEY, MAX_JOBCONF_SIZE);
    //
    // Grab some static constants
    //
    TASKTRACKER_EXPIRY_INTERVAL = conf.getLong("mapred.tasktracker.expiry.interval", 10 * 60 * 1000);
    RETIRE_JOB_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.interval", 24 * 60 * 60 * 1000);
    RETIRE_JOB_CHECK_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.check", 60 * 1000);
    retiredJobsCacheSize = conf.getInt("mapred.job.tracker.retiredjobs.cache.size", 1000);
    MAX_COMPLETE_USER_JOBS_IN_MEMORY = conf.getInt("mapred.jobtracker.completeuserjobs.maximum", 100);

    // values related to heuristic graylisting (a "fault" is a per-job
    // blacklisting; too many faults => node is graylisted across all jobs):
    TRACKER_FAULT_TIMEOUT_WINDOW = // 3 hours
            conf.getInt("mapred.jobtracker.blacklist.fault-timeout-window", 3 * 60);
    TRACKER_FAULT_BUCKET_WIDTH = // 15 minutes
            conf.getInt("mapred.jobtracker.blacklist.fault-bucket-width", 15);
    TRACKER_FAULT_THRESHOLD = conf.getInt("mapred.max.tracker.blacklists", 4);
    // future:  rename to "mapred.jobtracker.blacklist.fault-threshold" for
    // namespace consistency

    if (TRACKER_FAULT_BUCKET_WIDTH > TRACKER_FAULT_TIMEOUT_WINDOW) {
        TRACKER_FAULT_BUCKET_WIDTH = TRACKER_FAULT_TIMEOUT_WINDOW;
    }
    TRACKER_FAULT_BUCKET_WIDTH_MSECS = (long) TRACKER_FAULT_BUCKET_WIDTH * 60 * 1000;

    // ideally, TRACKER_FAULT_TIMEOUT_WINDOW should be an integral multiple of
    // TRACKER_FAULT_BUCKET_WIDTH, but round up just in case:
    NUM_FAULT_BUCKETS = (TRACKER_FAULT_TIMEOUT_WINDOW + TRACKER_FAULT_BUCKET_WIDTH - 1)
            / TRACKER_FAULT_BUCKET_WIDTH;

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // This configuration is there solely for tuning purposes and
    // once this feature has been tested in real clusters and an appropriate
    // value for the threshold has been found, this config might be taken out.
    AVERAGE_BLACKLIST_THRESHOLD = conf.getFloat("mapred.cluster.average.blacklist.threshold", 0.5f);

    // This is a directory of temporary submission files.  We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    initializeTaskMemoryRelatedConfig();

    // Read the hosts/exclude files to restrict access to the jobtracker.
    this.hostsReader = new HostsFileReader(conf.get("mapred.hosts", ""), conf.get("mapred.hosts.exclude", ""));
    aclsManager = new ACLsManager(conf, new JobACLsManager(conf), queueManager);

    LOG.info("Starting jobtracker with owner as " + getMROwner().getShortUserName());

    // Create the scheduler
    Class<? extends TaskScheduler> schedulerClass = conf.getClass("mapred.jobtracker.taskScheduler",
            JobQueueTaskScheduler.class, TaskScheduler.class);
    taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf);

    // Set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
    }

    int handlerCount = conf.getInt("mapred.job.tracker.handler.count", 10);
    this.interTrackerServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), handlerCount, false, conf,
            secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    String infoAddr = NetUtils.getServerAddress(conf, "mapred.job.tracker.info.bindAddress",
            "mapred.job.tracker.info.port", "mapred.job.tracker.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = clock.getTime();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf,
            aclsManager.getAdminsAcl());
    infoServer.setAttribute("job.tracker", this);
    // initialize history parameters.
    final JobTracker jtFinal = this;
    getMROwner().doAs(new PrivilegedExceptionAction<Boolean>() {
        @Override
        public Boolean run() throws Exception {
            JobHistory.init(jtFinal, conf, jtFinal.localMachine, jtFinal.startTime);
            return true;
        }
    });

    infoServer.addServlet("reducegraph", "/taskgraph", TaskGraphServlet.class);
    infoServer.start();

    this.trackerIdentifier = identifier;

    createInstrumentation();

    // The rpc/web-server ports can be ephemeral ports... 
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set("mapred.job.tracker", (this.localMachine + ":" + this.port));
    this.localFs = FileSystem.getLocal(conf);
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set("mapred.job.tracker.http.address", infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());

    // start the recovery manager
    recoveryManager = new RecoveryManager();

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = getMROwner().doAs(new PrivilegedExceptionAction<FileSystem>() {
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }
            // clean up the system dir, which will only work if hdfs is out of 
            // safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(getMROwner().getShortUserName())) {
                    throw new AccessControlException("The systemdir " + systemDir + " is not owned by "
                            + getMROwner().getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                }
            } catch (FileNotFoundException fnf) {
            } //ignore
            // Make sure that the backup data is preserved
            FileStatus[] systemDirData = fs.listStatus(this.systemDir);
            // Check if the history is enabled .. as we cant have persistence with 
            // history disabled
            if (conf.getBoolean("mapred.jobtracker.restart.recover", false) && systemDirData != null) {
                for (FileStatus status : systemDirData) {
                    try {
                        recoveryManager.checkAndAddJob(status);
                    } catch (Throwable t) {
                        LOG.warn("Failed to add the job " + status.getPath().getName(), t);
                    }
                }

                // Check if there are jobs to be recovered
                hasRestarted = recoveryManager.shouldRecover();
                if (hasRestarted) {
                    break; // if there is something to recover else clean the sys dir
                }
            }
            LOG.info("Cleaning up the system directory");
            fs.delete(systemDir, true);
            if (FileSystem.mkdirs(fs, systemDir, new FsPermission(SYSTEM_DIR_PERMISSION))) {
                break;
            }
            LOG.error("Mkdirs failed to create " + systemDir);
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on mapred.system.dir (" + systemDir + ") because of permissions.");
            LOG.warn(
                    "Manually delete the mapred.system.dir (" + systemDir + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ", ace);
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // Same with 'localDir' except it's always on the local disk.
    if (!hasRestarted) {
        jobConf.deleteLocalFiles(SUBDIR);
    }

    // Initialize history DONE folder
    FileSystem historyFS = getMROwner().doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws IOException {
            JobHistory.initDone(conf, fs);
            final String historyLogDir = JobHistory.getCompletedJobHistoryLocation().toString();
            infoServer.setAttribute("historyLogDir", historyLogDir);

            infoServer.setAttribute("serialNumberDirectoryDigits",
                    Integer.valueOf(JobHistory.serialNumberDirectoryDigits()));

            infoServer.setAttribute("serialNumberTotalDigits",
                    Integer.valueOf(JobHistory.serialNumberTotalDigits()));

            return new Path(historyLogDir).getFileSystem(conf);
        }
    });
    infoServer.setAttribute("fileSys", historyFS);
    infoServer.setAttribute("jobConf", conf);
    infoServer.setAttribute("aclManager", aclsManager);

    if (JobHistoryServer.isEmbedded(conf)) {
        LOG.info("History server being initialized in embedded mode");
        jobHistoryServer = new JobHistoryServer(conf, aclsManager, infoServer);
        jobHistoryServer.start();
        LOG.info("Job History Server web address: " + JobHistoryServer.getAddress(conf));
    }

    this.dnsToSwitchMapping = ReflectionUtils.newInstance(conf.getClass("topology.node.switch.mapping.impl",
            ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
    this.numTaskCacheLevels = conf.getInt("mapred.task.cache.levels", NetworkTopology.DEFAULT_HOST_LEVEL);

    //initializes the job status store
    completedJobStatusStore = new CompletedJobStatusStore(conf, aclsManager);
}

From source file:osu.beatmapdownloader.JFrame.java

public boolean downloadSong(String idMap, int prio, String Directory) {
       String fileName = null;/*from w  w  w  . ja va 2  s .c o m*/
       try {
           String url = "";
           if (model.get(prio).toString().contains("Blood")) {
               url = "http://bloodcat.com/osu/d/" + idMap;
               L_server.setText("Bloodcat Server");
           } else {
               if (C_OsuServer.isSelected() && model.get(0).toString().contains("Osu")) {
                   if (C_noVideo.isSelected())
                       url = "http://osu.ppy.sh/d/" + idMap + "n";
                   else
                       url = "http://osu.ppy.sh/d/" + idMap;
                   L_server.setText("Osu! Server");
               }
           }
           long start = System.nanoTime();
           long totalRead = 0;
           final double NANOS_PER_SECOND = 1000000000.0;
           final double BYTES_PER_MIB = 1024 * 1024;
           URLConnection request = null;
           request = new URL(url).openConnection();
           request.setRequestProperty("Cookie", url.contains("ppy") ? Cookie : "");
           request.setRequestProperty("User-Agent",
                   "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9.2.2) Gecko/20100316 Firefox/3.6.2");
           InputStream in = request.getInputStream();
           String raw = request.getHeaderField("Content-Disposition");
           fileName = raw.split("=")[1].replaceAll("\"", "").replaceAll("; filename*", "");
           request.getContentLength();
           double size = request.getContentLength();
           File aux = File.createTempFile(fileName.replaceAll("\\*", "").replaceAll(";", ""), ".osz");
           L_FileName.setText(fileName.replaceAll("\\*", "").replaceAll(";", ""));
           FileOutputStream out = new FileOutputStream(aux);
           byte[] buffer = new byte[1024];
           int len = in.read(buffer);
           L_totalSize.setText((String) new DecimalFormat("#.##").format(size * 0.000001) + " Mb");
           int bytes = 0;
           Pro_ProgressBar.setMinimum(0);
           Pro_ProgressBar.setMaximum((int) (size / 1000));
           long acu = 0;
           while (len != -1) {
               bytes++;
               out.write(buffer, 0, len);
               len = in.read(buffer);
               if (len == 1024) {
                   acu += len;
                   BigDecimal a = new BigDecimal(acu * 0.000001);
                   BigDecimal roundOff = a.setScale(2, BigDecimal.ROUND_HALF_EVEN);
                   L_fileValue.setText(roundOff + "");
               }
               Pro_ProgressBar.setValue(bytes);
               totalRead += len;
               BigDecimal a = new BigDecimal(
                       ((NANOS_PER_SECOND / BYTES_PER_MIB * totalRead / (System.nanoTime() - start + 1)) * 1000));
               BigDecimal speed = a.setScale(2, BigDecimal.ROUND_HALF_EVEN);
               //String speed = new DecimalFormat("#.##").format(((NANOS_PER_SECOND / BYTES_PER_MIB * totalRead / (System.nanoTime() - start + 1)) * 1000));
               L_Second.setText(speed + "");
               BigDecimal b = new BigDecimal((((size * 0.000001) - (acu * 0.000001)) * 0.1)
                       / (((NANOS_PER_SECOND / BYTES_PER_MIB * totalRead / (System.nanoTime() - start + 1))
                               * 1000))
                       * 10000);
               BigDecimal speed_total = b.setScale(2, BigDecimal.ROUND_HALF_EVEN);
               L_seconds.setText(speed_total + "");
               if (Thread.interrupted()) {
                   in.close();
                   out.close();
                   aux.deleteOnExit();
                   throw new InterruptedException();
               }
           }
           in.close();
           out.close();
           FileUtils.copyFile(aux,
                   new File(Directory + File.separator + fileName.replaceAll("\\*", "").replaceAll(";", "")));
           aux.deleteOnExit();
           return true;
       } catch (Exception e) {
           errorFatal("DOWNLOADING");
           errorFatal(e.toString());
           errorFatal("--------------------------------------");
           prio++;
           errorConection++;
           L_Trying.setText(errorConection + "");
           int limitTry = 3;
           if (errorConection >= limitTry) {
               Errors++;
               L_Errors.setText(Errors + "");
               errorDownload("-The connection to this Beatmap was failed, '" + errorConection
                       + "' times, it was skipped.");
               errorDownload("--The filename is '" + fileName + "', with the id '" + idMap + "'. ");
               errorDownload("----------------------");
           } else {
               if (model.getSize() == prio)
                   prio = 0;
               downloadSong(idMap, prio, Directory);
           }
           return false;
       }
   }