Example usage for java.util.concurrent Semaphore Semaphore

List of usage examples for java.util.concurrent Semaphore Semaphore

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore Semaphore.

Prototype

public Semaphore(int permits) 

Source Link

Document

Creates a Semaphore with the given number of permits and nonfair fairness setting.

Usage

From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java

@Override
public boolean removeNode(final ClusterConfig conf, Collection<String> nodes) {
    try {//  ww  w .j  a  v  a 2s.  co  m
        if (newClusterConf == null) {
            // setting clusterconf, componentconf and logger
            if (!initializeDataMembers(conf)) {
                return false;
            }
        }
        final Semaphore semaphore = new Semaphore(nodes.size());
        // undeploying package from each node
        for (final String host : nodes) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    // setting nodestatus default value to false
                    boolean nodestatus = false;
                    // if service stopped successfully, then removing
                    // component from node
                    if (stopNode(host)) {
                        nodestatus = removeNode(host);
                    }
                    conf.getNodes().get(host).setStatus(nodestatus);
                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodes.size());
    } catch (Exception e) {
        addClusterError("Could not remove " + getComponentName(), e);
        return false;
    }
    return AnkushUtils.getStatus(conf.getNodes());
}

From source file:hudson.plugins.android_emulator.SdkInstaller.java

/**
 * Acquires an exclusive lock for the machine we're executing on.
 * <p>//from w  w w . j a va2  s  .  co  m
 * The lock only has one permit, meaning that other executors on the same node which want to
 * install SDK components will block here until the lock is released by another executor.
 *
 * @return The semaphore for the current machine, which must be released once finished with.
 */
private static Semaphore acquireLock() throws InterruptedException {
    // Retrieve the lock for this node
    Semaphore semaphore;
    final Node node = Computer.currentComputer().getNode();
    synchronized (node) {
        semaphore = mutexByNode.get(node);
        if (semaphore == null) {
            semaphore = new Semaphore(1);
            mutexByNode.put(node, semaphore);
        }
    }

    // Block until the lock is available
    semaphore.acquire();
    return semaphore;
}

From source file:io.kodokojo.bdd.stage.cluster.ClusterApplicationGiven.java

private void startKodokojo() {
    String keystorePath = System.getProperty("javax.net.ssl.keyStore", null);
    if (StringUtils.isBlank(keystorePath)) {
        String keystorePathDefined = new File("").getAbsolutePath()
                + "/src/test/resources/keystore/mykeystore.jks";
        System.out.println(keystorePathDefined);

        System.setProperty("javax.net.ssl.keyStore", keystorePathDefined);
    }// www  .  j av  a  2 s.  c om
    BrickUrlFactory brickUrlFactory = new MarathonBrickUrlFactory(marathonUrl);
    System.setProperty("javax.net.ssl.keyStorePassword", "password");
    System.setProperty("security.ssl.rootCa.ks.alias", "rootcafake");
    System.setProperty("security.ssl.rootCa.ks.password", "password");
    System.setProperty("application.dns.domain", "kodokojo.io");
    System.setProperty("redis.host", redisService.getHost());
    System.setProperty("redis.port", "" + redisService.getPort());

    System.setProperty("marathon.url", "http://" + dockerTestSupport.getServerIp() + ":8080");
    System.setProperty("lb.defaultIp", dockerTestSupport.getServerIp());
    System.setProperty("application.dns.domain", "kodokojo.dev");
    LOGGER.debug("redis.port: {}", System.getProperty("redis.port"));

    injector = Guice.createInjector(new PropertyModule(new String[] {}), new RedisModule(),
            new SecurityModule(), new ServiceModule(), new ActorModule(), new AwsModule(),
            new EmailSenderModule(), new UserEndpointModule(), new ProjectEndpointModule(),
            new AbstractModule() {
                @Override
                protected void configure() {

                }

                @Provides
                @Singleton
                ServiceLocator provideServiceLocator(MarathonConfig marathonConfig) {
                    return new MarathonServiceLocator(marathonConfig.url());
                }

                @Provides
                @Singleton
                ConfigurationStore provideConfigurationStore(MarathonConfig marathonConfig) {
                    return new MarathonConfigurationStore(marathonConfig.url());
                }

                @Provides
                @Singleton
                BrickManager provideBrickManager(MarathonConfig marathonConfig,
                        BrickConfigurerProvider brickConfigurerProvider, ProjectStore projectStore,
                        ApplicationConfig applicationConfig, BrickUrlFactory brickUrlFactory) {
                    MarathonServiceLocator marathonServiceLocator = new MarathonServiceLocator(
                            marathonConfig.url());
                    return new MarathonBrickManager(marathonConfig.url(), marathonServiceLocator,
                            brickConfigurerProvider, projectStore, false, applicationConfig.domain(),
                            brickUrlFactory);
                }
            });
    Launcher.INJECTOR = injector;
    userStore = injector.getInstance(UserStore.class);
    projectStore = injector.getInstance(ProjectStore.class);
    entityStore = injector.getInstance(EntityStore.class);
    //BrickFactory brickFactory = injector.getInstance(BrickFactory.class);
    restEntryPointHost = "localhost";
    restEntryPointPort = TestUtils.getEphemeralPort();
    projectManager = new DefaultProjectManager(domain, injector.getInstance(ConfigurationStore.class),
            projectStore, injector.getInstance(BootstrapConfigurationProvider.class), new NoOpDnsManager(),
            new DefaultBrickConfigurerProvider(brickUrlFactory),
            injector.getInstance(BrickConfigurationStarter.class), brickUrlFactory);
    httpUserSupport = new HttpUserSupport(new OkHttpClient(), restEntryPointHost + ":" + restEntryPointPort);
    Set<SparkEndpoint> sparkEndpoints = new HashSet<>(
            injector.getInstance(Key.get(new TypeLiteral<Set<SparkEndpoint>>() {
            })));
    Key<UserAuthenticator<SimpleCredential>> authenticatorKey = Key
            .get(new TypeLiteral<UserAuthenticator<SimpleCredential>>() {
            });
    UserAuthenticator<SimpleCredential> userAuthenticator = injector.getInstance(authenticatorKey);
    sparkEndpoints.add(new ProjectSparkEndpoint(userAuthenticator, userStore, projectStore, projectManager,
            injector.getInstance(BrickFactory.class)));
    httpEndpoint = new HttpEndpoint(restEntryPointPort, new SimpleUserAuthenticator(userStore), sparkEndpoints);
    Semaphore semaphore = new Semaphore(1);
    try {
        semaphore.acquire();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }

    Thread t = new Thread(() -> {
        httpEndpoint.start();
        semaphore.release();
    });
    t.start();
    try {
        semaphore.acquire();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testCustomUnit() throws InterruptedException {
    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        assertSample(r.getTimersList(), "timer", 8d);
        Assert.assertEquals(0, r.getCountersCount());
        Assert.assertEquals(0, r.getGaugesCount());
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final Semaphore semaphore = new Semaphore(0);
    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH))
            .setEventHandler(new CompletionHandler(semaphore)).build();

    final TsdEvent event = new TsdEvent(Collections.emptyMap(),
            createQuantityMap("timer", TsdQuantity.newInstance(8d, () -> "Foo")),
            TEST_EMPTY_SERIALIZATION_COUNTERS, TEST_EMPTY_SERIALIZATION_GAUGES);

    sink.record(event);//  ww w  .  j ava 2  s. com
    semaphore.acquire();

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(1, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:com.thoughtworks.go.server.service.BuildAssignmentServiceIntegrationTest.java

@Test
public void shouldNotReloadScheduledJobPlansWhenAgentWorkAssignmentIsInProgress() throws Exception {
    fixture.createPipelineWithFirstStageScheduled();
    Pipeline pipeline = pipelineDao.mostRecentPipeline(fixture.pipelineName);
    JobInstance job = pipeline.getFirstStage().getJobInstances().first();

    final JobInstanceService mockJobInstanceService = mock(JobInstanceService.class);

    final Pipeline pipeline1 = pipeline;
    final Semaphore sem = new Semaphore(1);
    sem.acquire();/* w  w  w .  j  ava2  s . c om*/
    when(mockJobInstanceService.orderedScheduledBuilds())
            .thenReturn(jobInstanceService.orderedScheduledBuilds());
    when(mockJobInstanceService.buildByIdWithTransitions(job.getId()))
            .thenReturn(jobInstanceService.buildByIdWithTransitions(job.getId()));

    ScheduledPipelineLoader scheduledPipelineLoader = new ScheduledPipelineLoader(null, null, null, null, null,
            null, null, null) {
        @Override
        public Pipeline pipelineWithPasswordAwareBuildCauseByBuildId(long buildId) {
            sem.release();
            sleepQuietly(1000);
            verify(mockJobInstanceService, times(1)).orderedScheduledBuilds();
            return pipeline1;
        }
    };

    final BuildAssignmentService buildAssignmentServiceUnderTest = new BuildAssignmentService(goConfigService,
            mockJobInstanceService, scheduleService, agentService, environmentConfigService,
            transactionTemplate, scheduledPipelineLoader, pipelineService, builderFactory,
            maintenanceModeService, elasticAgentPluginService, systemEnvironment, secretParamResolver,
            jobStatusTopic, consoleService);

    final Throwable[] fromThread = new Throwable[1];
    buildAssignmentServiceUnderTest.onTimer();

    Thread assigner = new Thread(() -> {
        try {
            final AgentConfig agentConfig = AgentMother.localAgentWithResources("some-other-resource");

            buildAssignmentServiceUnderTest.assignWorkToAgent(agent(agentConfig));
        } catch (Throwable e) {
            e.printStackTrace();
            fromThread[0] = e;
        } finally {

        }
    }, "assignmentThread");
    assigner.start();

    sem.acquire();
    buildAssignmentServiceUnderTest.onTimer();

    assigner.join();
    assertThat(fromThread[0], is(nullValue()));
}

From source file:com.parse.ParsePushTest.java

@Test
public void testSendInBackgroundWithCallbackSuccess() throws Exception {
    // Mock controller
    ParsePushController controller = mock(ParsePushController.class);
    when(controller.sendInBackground(any(ParsePush.State.class), anyString()))
            .thenReturn(Task.<Void>forResult(null));
    ParseCorePlugins.getInstance().registerPushController(controller);

    // Make sample ParsePush data and call method
    ParsePush push = new ParsePush();
    JSONObject data = new JSONObject();
    data.put("key", "value");
    List<String> channels = new ArrayList<>();
    channels.add("test");
    channels.add("testAgain");
    push.builder.expirationTime((long) 1000).data(data).pushToIOS(true).channelSet(channels);
    final Semaphore done = new Semaphore(0);
    final Capture<Exception> exceptionCapture = new Capture<>();
    push.sendInBackground(new SendCallback() {
        @Override/* w  w  w.  j  a  v  a  2  s .c om*/
        public void done(ParseException e) {
            exceptionCapture.set(e);
            done.release();
        }
    });

    // Make sure controller is executed and state parameter is correct
    assertNull(exceptionCapture.get());
    assertTrue(done.tryAcquire(1, 10, TimeUnit.SECONDS));
    ArgumentCaptor<ParsePush.State> stateCaptor = ArgumentCaptor.forClass(ParsePush.State.class);
    verify(controller, times(1)).sendInBackground(stateCaptor.capture(), anyString());
    ParsePush.State state = stateCaptor.getValue();
    assertTrue(state.pushToIOS());
    assertEquals(data, state.data(), JSONCompareMode.NON_EXTENSIBLE);
    assertEquals(2, state.channelSet().size());
    assertTrue(state.channelSet().contains("test"));
    assertTrue(state.channelSet().contains("testAgain"));
}

From source file:com.thoughtworks.go.server.service.BackupServiceIntegrationTest.java

@Test
public void shouldExecutePostBackupScriptAndReturnResultOnSuccess() throws InterruptedException {
    final Semaphore waitForBackupToComplete = new Semaphore(1);
    GoConfigService configService = mock(GoConfigService.class);
    ServerConfig serverConfig = new ServerConfig();
    serverConfig.setBackupConfig(new BackupConfig(null, "jcmd", false, false));
    when(configService.serverConfig()).thenReturn(serverConfig);
    GoMailSender goMailSender = mock(GoMailSender.class);
    when(configService.getMailSender()).thenReturn(goMailSender);
    when(configService.adminEmail()).thenReturn("mail@admin.com");
    when(configService.isUserAdmin(admin)).thenReturn(true);
    TimeProvider timeProvider = mock(TimeProvider.class);
    DateTime now = new DateTime();
    when(timeProvider.currentDateTime()).thenReturn(now);

    final MessageCollectingBackupUpdateListener backupUpdateListener = new MessageCollectingBackupUpdateListener(
            waitForBackupToComplete);/*from   w  ww.j  ava  2 s .c o  m*/

    waitForBackupToComplete.acquire();
    backupService = new BackupService(artifactsDirHolder, configService, timeProvider, backupInfoRepository,
            systemEnvironment, configRepository, databaseStrategy, backupQueue);
    Thread backupThd = new Thread(() -> backupService.startBackup(admin, backupUpdateListener));

    backupThd.start();
    waitForBackupToComplete.acquire();
    assertThat(backupUpdateListener.getMessages()
            .contains(BackupProgressStatus.POST_BACKUP_SCRIPT_COMPLETE.getMessage()), is(true));
    backupThd.join();
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testEndpointNotAvailable() throws InterruptedException {
    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        Assert.assertEquals(0, r.getTimersCount());
        Assert.assertEquals(0, r.getCountersCount());
        Assert.assertEquals(0, r.getGaugesCount());
    })).willReturn(WireMock.aResponse().withStatus(404)));

    final AtomicBoolean assertionResult = new AtomicBoolean(false);
    final Semaphore semaphore = new Semaphore(0);
    final org.slf4j.Logger logger = Mockito.mock(org.slf4j.Logger.class);
    final Sink sink = new ApacheHttpSink(
            new ApacheHttpSink.Builder().setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH))
                    .setEventHandler(new AttemptCompletedAssertionHandler(assertionResult, 1, 2, false,
                            new CompletionHandler(semaphore))),
            logger);//from   w w  w. j a  v a 2  s .c  o  m

    final TsdEvent event = new TsdEvent(ANNOTATIONS, TEST_EMPTY_SERIALIZATION_TIMERS,
            TEST_EMPTY_SERIALIZATION_COUNTERS, TEST_EMPTY_SERIALIZATION_GAUGES);

    sink.record(event);
    semaphore.acquire();

    // Ensure expected handler was invoked
    Assert.assertTrue(assertionResult.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(1, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());

    // Assert that an IOException was captured
    Mockito.verify(logger).error(
            Mockito.startsWith("Encountered failure when sending metrics to HTTP endpoint; uri="),
            Mockito.any(RuntimeException.class));
}

From source file:org.apache.hadoop.hdfs.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//  ww w . j  ava2 s.c  o  m
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();
    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());
    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));
    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());
    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:com.impetus.ankush2.hadoop.monitor.HadoopComponentMonitor.java

/**
 * Editparams./*from   w  ww  .j  a  va2 s  .c  o  m*/
 */
private void editparams() {

    this.hadoopConfig = HadoopUtils.getHadoopConfig(this.clusterConf);
    String errMsg = "Unable to process request to edit Hadoop configuration files.";

    if (!HadoopUtils.isManagedByAnkush(this.hadoopConfig)) {
        this.addAndLogError(errMsg + " " + Constant.Registration.ErrorMsg.NOT_MANAGED_MODE);
        return;
    }

    try {
        this.clusterConf.incrementOperation();
        boolean isAgentDown = AnkushUtils.isAnyAgentDown(this.hadoopConfig.getNodes().keySet());
        if (isAgentDown) {
            throw new AnkushException(
                    "Could not process edit parameters request: AnkushAgent is down on few nodes.");
        }

        final Map<String, Object> confParams = (Map<String, Object>) parameterMap.get("params");

        final String loggedUser = (String) parameterMap.get("loggedUser");

        AppStoreWrapper.getExecutor().execute(new Runnable() {
            @Override
            public void run() {
                final Semaphore semaphore = new Semaphore(hadoopConfig.getNodes().size());
                try {
                    // connect with all the component nodes
                    AnkushUtils.connectNodesString(clusterConf, hadoopConfig.getNodes().keySet());

                    for (final String host : hadoopConfig.getNodes().keySet()) {

                        semaphore.acquire();
                        AppStoreWrapper.getExecutor().execute(new Runnable() {
                            @Override
                            public void run() {
                                try {
                                    for (Entry entry : confParams.entrySet()) {

                                        // get fileName
                                        String fileName = (String) entry.getKey();
                                        // get config params list
                                        List<Map> params = (List<Map>) entry.getValue();

                                        for (Map param : params) {
                                            final Parameter parameter = JsonMapperUtil.objectFromMap(param,
                                                    Parameter.class);

                                            String status = parameter.getStatus();

                                            Result res = null;

                                            ConfigurationManager confManager = new ConfigurationManager();

                                            // get component
                                            // homepath
                                            String confDir = HadoopUtils.getHadoopConfDir(hadoopConfig);

                                            // get server.properties
                                            // file path
                                            String propertyFilePath = confDir + fileName;

                                            // if connection is
                                            // established.

                                            switch (Constant.ParameterActionType
                                                    .valueOf(status.toUpperCase())) {
                                            case ADD:
                                                if (addParam(clusterConf.getNodes().get(host),
                                                        Constant.Component.Name.HADOOP, parameter.getName(),
                                                        parameter.getValue(), propertyFilePath,
                                                        Constant.File_Extension.XML)) {
                                                    confManager.saveConfiguration(clusterConf.getClusterId(),
                                                            loggedUser, fileName, host, parameter.getName(),
                                                            parameter.getValue());
                                                }
                                                break;
                                            case EDIT:
                                                if (editParam(clusterConf.getNodes().get(host),
                                                        Constant.Component.Name.HADOOP, parameter.getName(),
                                                        parameter.getValue(), propertyFilePath,
                                                        Constant.File_Extension.XML)) {
                                                    confManager.saveConfiguration(clusterConf.getClusterId(),
                                                            loggedUser, fileName, host, parameter.getName(),
                                                            parameter.getValue());
                                                }
                                                break;
                                            case DELETE:
                                                if (deleteParam(clusterConf.getNodes().get(host),
                                                        Constant.Component.Name.HADOOP, parameter.getName(),
                                                        propertyFilePath, Constant.File_Extension.XML)) {
                                                    confManager.removeOldConfiguration(
                                                            clusterConf.getClusterId(), host, fileName,
                                                            parameter.getName());
                                                }
                                                break;
                                            }
                                        }
                                    }
                                } catch (Exception e) {
                                    // To be Handled : Exception for
                                    // Edit Parameter call
                                } finally {
                                    if (semaphore != null) {
                                        semaphore.release();
                                    }
                                }
                            }
                        });
                    }
                    semaphore.acquire(hadoopConfig.getNodes().size());
                    // disconnect with all the component nodes
                    AnkushUtils.disconnectCompNodes(clusterConf, hadoopConfig.getNodes().keySet());
                } catch (Exception e) {
                    // To be Handled : Exception for Edit Parameter call
                }
            }

        });
        result.put("message", "Parameters update request placed successfully.");
    } catch (AnkushException e) {
        this.addErrorAndLogException(e.getMessage(), e);
    } catch (Exception e) {
        this.addErrorAndLogException(errMsg, e);
    }
}