Example usage for java.util Collections synchronizedMap

List of usage examples for java.util Collections synchronizedMap

Introduction

In this page you can find the example usage for java.util Collections synchronizedMap.

Prototype

public static <K, V> Map<K, V> synchronizedMap(Map<K, V> m) 

Source Link

Document

Returns a synchronized (thread-safe) map backed by the specified map.

Usage

From source file:com.github.nethad.clustermeister.provisioning.ec2.AmazonNodeManager.java

private void loadConfiguration(Configuration configuration) {
    AmazonConfigurationLoader configurationLoader = new AmazonConfigurationLoader(configuration);

    awsWebApiCredentials = new PasswordCredentials("AWS Web API Credentials",
            configurationLoader.getAccessKeyId(), configurationLoader.getSecretKey());
    nodeJvmOptions = configurationLoader.getNodeJvmOptions();
    nodeLogLevel = configurationLoader.getNodeLogLevel();
    nodeRemoteLogging = configurationLoader.getNodeRemoteLogging();
    nodeRemoteLoggingPort = configurationLoader.getNodeRemoteLoggingPort();

    profiles = Collections.synchronizedMap(configurationLoader.getConfiguredProfiles());

    artifactsToPreload = DependencyConfigurationUtil.getConfiguredDependencies(configuration);
}

From source file:org.opencms.xml.containerpage.CmsADECache.java

/**
 * Initializes the caches.<p>// ww w .ja v a2s  . com
 * 
 * @param memMonitor the memory monitor instance
 * @param cacheSettings the system cache settings
 */
private void initialize(CmsMemoryMonitor memMonitor, CmsADECacheSettings cacheSettings) {

    // container page caches
    Map<String, CmsXmlContainerPage> lruMapCntPage = CmsCollectionsGenericWrapper
            .createLRUMap(cacheSettings.getContainerPageOfflineSize());
    m_containerPagesOffline = Collections.synchronizedMap(lruMapCntPage);
    memMonitor.register(CmsADECache.class.getName() + ".containerPagesOffline", lruMapCntPage);

    lruMapCntPage = CmsCollectionsGenericWrapper.createLRUMap(cacheSettings.getContainerPageOnlineSize());
    m_containerPagesOnline = Collections.synchronizedMap(lruMapCntPage);
    memMonitor.register(CmsADECache.class.getName() + ".containerPagesOnline", lruMapCntPage);

    // container page caches
    Map<String, CmsXmlGroupContainer> lruMapGroupContainer = CmsCollectionsGenericWrapper
            .createLRUMap(cacheSettings.getGroupContainerOfflineSize());
    m_groupContainersOffline = Collections.synchronizedMap(lruMapGroupContainer);
    memMonitor.register(CmsADECache.class.getName() + ".groupContainersOffline", lruMapGroupContainer);

    lruMapGroupContainer = CmsCollectionsGenericWrapper
            .createLRUMap(cacheSettings.getGroupContainerOnlineSize());
    m_groupContainersOnline = Collections.synchronizedMap(lruMapGroupContainer);
    memMonitor.register(CmsADECache.class.getName() + ".groupContainersOnline", lruMapGroupContainer);
}

From source file:derson.com.httpsender.AsyncHttpClient.AsyncHttpClient.java

/**
 * Creates a new AsyncHttpClient./*from w w  w. java2s  .  c  o  m*/
 *
 * @param schemeRegistry SchemeRegistry to be used
 */
public AsyncHttpClient(SchemeRegistry schemeRegistry) {

    BasicHttpParams httpParams = new BasicHttpParams();

    ConnManagerParams.setTimeout(httpParams, connectTimeout);
    ConnManagerParams.setMaxConnectionsPerRoute(httpParams, new ConnPerRouteBean(maxConnections));
    ConnManagerParams.setMaxTotalConnections(httpParams, DEFAULT_MAX_CONNECTIONS);

    HttpConnectionParams.setSoTimeout(httpParams, responseTimeout);
    HttpConnectionParams.setConnectionTimeout(httpParams, connectTimeout);
    HttpConnectionParams.setTcpNoDelay(httpParams, true);
    HttpConnectionParams.setSocketBufferSize(httpParams, DEFAULT_SOCKET_BUFFER_SIZE);

    HttpProtocolParams.setVersion(httpParams, HttpVersion.HTTP_1_1);

    ThreadSafeClientConnManager cm = new ThreadSafeClientConnManager(httpParams, schemeRegistry);

    threadPool = getDefaultThreadPool();
    requestMap = Collections.synchronizedMap(new WeakHashMap<Context, List<RequestHandle>>());
    clientHeaderMap = new HashMap<String, String>();

    httpContext = new SyncBasicHttpContext(new BasicHttpContext());
    httpClient = new DefaultHttpClient(cm, httpParams);
    httpClient.addRequestInterceptor(new HttpRequestInterceptor() {
        @Override
        public void process(HttpRequest request, HttpContext context) {
            if (!request.containsHeader(HEADER_ACCEPT_ENCODING)) {
                request.addHeader(HEADER_ACCEPT_ENCODING, ENCODING_GZIP);
            }
            for (String header : clientHeaderMap.keySet()) {
                if (request.containsHeader(header)) {
                    Header overwritten = request.getFirstHeader(header);
                    HPLog.d(LOG_TAG,
                            String.format("Headers were overwritten! (%s | %s) overwrites (%s | %s)", header,
                                    clientHeaderMap.get(header), overwritten.getName(),
                                    overwritten.getValue()));

                    //remove the overwritten header
                    request.removeHeader(overwritten);
                }
                request.addHeader(header, clientHeaderMap.get(header));
            }
        }
    });

    httpClient.addResponseInterceptor(new HttpResponseInterceptor() {
        @Override
        public void process(HttpResponse response, HttpContext context) {
            final HttpEntity entity = response.getEntity();
            if (entity == null) {
                return;
            }
            final Header encoding = entity.getContentEncoding();
            if (encoding != null) {
                for (HeaderElement element : encoding.getElements()) {
                    if (element.getName().equalsIgnoreCase(ENCODING_GZIP)) {
                        response.setEntity(new InflatingEntity(entity));
                        break;
                    }
                }
            }
        }
    });

    httpClient.addRequestInterceptor(new HttpRequestInterceptor() {
        @Override
        public void process(final HttpRequest request, final HttpContext context)
                throws HttpException, IOException {
            AuthState authState = (AuthState) context.getAttribute(ClientContext.TARGET_AUTH_STATE);
            CredentialsProvider credsProvider = (CredentialsProvider) context
                    .getAttribute(ClientContext.CREDS_PROVIDER);
            HttpHost targetHost = (HttpHost) context.getAttribute(ExecutionContext.HTTP_TARGET_HOST);

            if (authState.getAuthScheme() == null) {
                AuthScope authScope = new AuthScope(targetHost.getHostName(), targetHost.getPort());
                Credentials creds = credsProvider.getCredentials(authScope);
                if (creds != null) {
                    authState.setAuthScheme(new BasicScheme());
                    authState.setCredentials(creds);
                }
            }
        }
    }, 0);

    httpClient
            .setHttpRequestRetryHandler(new RetryHandler(DEFAULT_MAX_RETRIES, DEFAULT_RETRY_SLEEP_TIME_MILLIS));
}

From source file:org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore.java

@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
    Preconditions.checkArgument(/*from  w  w w . jav a  2s . co m*/
            conf.getLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS,
                    YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0,
            "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_TTL_MS);
    Preconditions.checkArgument(
            conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
                    YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0,
            "%s property value should be greater than zero",
            YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
    Preconditions.checkArgument(
            conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
                    YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0,
            "%s property value should be greater than or equal to zero",
            YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
    Preconditions.checkArgument(
            conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
                    YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0,
            " %s property value should be greater than zero",
            YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
    Preconditions.checkArgument(
            conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
                    YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0,
            "%s property value should be greater than zero",
            YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);

    Options options = new Options();
    options.createIfMissing(true);
    options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
    JniDBFactory factory = new JniDBFactory();
    Path dbPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
    FileSystem localFS = null;
    try {
        localFS = FileSystem.getLocal(conf);
        if (!localFS.exists(dbPath)) {
            if (!localFS.mkdirs(dbPath)) {
                throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath);
            }
            localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
        }
    } finally {
        IOUtils.cleanup(LOG, localFS);
    }
    LOG.info("Using leveldb path " + dbPath);
    db = factory.open(new File(dbPath.toString()), options);
    checkVersion();
    startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf)));
    startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf)));

    if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) {
        deletionThread = new EntityDeletionThread(conf);
        deletionThread.start();
    }

    super.serviceInit(conf);
}

From source file:com.pinterest.terrapin.client.TerrapinClient.java

/**
 * Issues a call for retrieving a response for multiple keys. Does appropriate batching.
 * Uses a 2 pass approach://from   w  w  w  . j  a v a2 s.  c om
 * 1) Pass 1 - Batches the keys by partition, issues the requests to each relevant replica
 *             and tracks failed keys and replicas with failures.
 * 2) Pass 2 - Batches the failed keys from pass 1 by host, issues the request to
 *             relevant replicas, excluding the replicas with failures in pass 1.
 *
 * TODO(varun): Think about using speculative execution for the single key lookup use case.
 *              It could provide better latency.
 *
 * @param fileSet The file set against which the request should be issued.
 * @param keyList The list of keys.
 * @param retry Whether to perform a second try on the cluster.
 * @return A future wrapping a TerrapinResponse object.
 */
protected Future<TerrapinResponse> getManyHelper(final String fileSet, final Set<ByteBuffer> keyList,
        final boolean retry) {
    Pair<FileSetInfo, ViewInfo> pair = null;
    try {
        pair = fileSetViewManager.getFileSetViewInfo(fileSet);
    } catch (TerrapinGetException e) {
        return Future.exception(e);
    }
    final FileSetInfo info = pair.getLeft();
    final ViewInfo viewInfo = pair.getRight();

    // This runs in two passes. In the first pass, we send a query to all the hosts
    // containing the keys in @keyList. We collect the list of failed keys in the first
    // pass and also the set of hosts which had errors. We send out a second query
    // with the failed keys to the respective set of hosts and attempt to exclude
    // the initial set of failed hosts.
    final Set<String> failedHostsFirstPass = Collections.synchronizedSet(Sets.<String>newHashSet());
    final Map<ByteBuffer, TerrapinSingleResponse> failedKeysFirstPass = Collections
            .synchronizedMap(Maps.<ByteBuffer, TerrapinSingleResponse>newHashMap());
    Map<String, Future<TerrapinResponse>> responseFutureMapFirstPass = getManyHelper(fileSet,
            info.servingInfo.helixResource, info.servingInfo.numPartitions, viewInfo, keyList,
            info.servingInfo.partitionerType, failedHostsFirstPass, failedKeysFirstPass,
            (Set) Sets.newHashSet(), 1);
    List<Future<TerrapinResponse>> responseFutureListFirstPass = Lists
            .newArrayListWithCapacity(responseFutureMapFirstPass.size());
    for (Map.Entry<String, Future<TerrapinResponse>> entry : responseFutureMapFirstPass.entrySet()) {
        responseFutureListFirstPass.add(entry.getValue());
    }
    // For the failed keys.
    return Stats.timeFutureMillis(statsPrefix + fileSet + "-latency",
            Future.<TerrapinResponse>collect(responseFutureListFirstPass)
                    .flatMap(new Function<List<TerrapinResponse>, Future<TerrapinResponse>>() {
                        @Override
                        public Future<TerrapinResponse> apply(final List<TerrapinResponse> responseListPass1) {
                            // At this point, we have a black list of hosts and we also have a list of keys
                            // which did not succeed in the first run.
                            // If the first pass fully succeeded or we have explicitly disabled retries,
                            // then don't perform a retry.
                            if (failedKeysFirstPass.isEmpty() || !retry) {
                                TerrapinResponse aggResponse = new TerrapinResponse();
                                aggResponse
                                        .setResponseMap((Map) Maps.newHashMapWithExpectedSize(keyList.size()));
                                for (TerrapinResponse response : responseListPass1) {
                                    aggResponse.getResponseMap().putAll(response.getResponseMap());
                                }
                                aggResponse.getResponseMap().putAll(failedKeysFirstPass);
                                return Future.value(aggResponse);
                            }
                            // Otherwise, we fire off a second set of futures.
                            Map<String, Future<TerrapinResponse>> responseFutureMapSecondPass = getManyHelper(
                                    fileSet, info.servingInfo.helixResource, info.servingInfo.numPartitions,
                                    viewInfo, failedKeysFirstPass.keySet(), info.servingInfo.partitionerType,
                                    null, null, failedHostsFirstPass, 2);
                            List<Future<TerrapinResponse>> responseFutureListSecondPass = Lists
                                    .newArrayListWithCapacity(responseFutureMapSecondPass.size());
                            responseFutureListSecondPass.addAll(responseFutureMapSecondPass.values());
                            return Future.collect(responseFutureListSecondPass)
                                    .map(new Function<List<TerrapinResponse>, TerrapinResponse>() {
                                        @Override
                                        public TerrapinResponse apply(
                                                List<TerrapinResponse> responseListPass2) {
                                            // The two responses (first pass and second pass) should be disjoint
                                            // in the set of keys they return, so we can safely merge them.
                                            TerrapinResponse aggResponse = new TerrapinResponse();
                                            aggResponse.setResponseMap(
                                                    (Map) Maps.newHashMapWithExpectedSize(keyList.size()));
                                            for (TerrapinResponse response : responseListPass1) {
                                                aggResponse.getResponseMap().putAll(response.getResponseMap());
                                            }
                                            for (TerrapinResponse response : responseListPass2) {
                                                aggResponse.getResponseMap().putAll(response.getResponseMap());
                                            }
                                            return aggResponse;
                                        }
                                    });
                        }
                    }));
}

From source file:org.isatools.isacreator.spreadsheet.Spreadsheet.java

public void instantiateSpreadsheet() {

    ResourceInjector.get("spreadsheet-package.style").inject(this);

    observers = new ArrayList<CopyPasteObserver>();

    spreadsheetPopups = new SpreadsheetPopupMenus(this);
    spreadsheetFunctions = new SpreadsheetFunctions(this);

    columnDependencies = new HashMap<TableColumn, List<TableColumn>>();
    Collections.synchronizedMap(columnDependencies);
    hiddenColumns = new HashSet<String>();

    setLayout(new BorderLayout());

    createSpreadsheetModel();/* w  ww .  j a  v a2  s  .co  m*/
    populateSpreadsheetWithContent();
    addOntologyTermsToUserHistory();

    // assign copy/paste listener
    new CopyPasteAdaptor(this);

    JScrollPane pane = new JScrollPane(table, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED,
            JScrollPane.HORIZONTAL_SCROLLBAR_ALWAYS);
    pane.setBackground(UIHelper.BG_COLOR);
    pane.setAutoscrolls(true);
    pane.getViewport().setBackground(UIHelper.BG_COLOR);
    pane.setBorder(UIHelper.EMPTY_BORDER);

    IAppWidgetFactory.makeIAppScrollPane(pane);

    add(pane, BorderLayout.CENTER);

    createButtonPanel();
    addUndoableEditListener(undoManager);
}

From source file:org.apache.hadoop.hbase.master.AssignmentManager.java

/**
 * Constructs a new assignment manager.// w ww.j  a v  a 2 s .  c om
 *
 * @param server
 * @param serverManager
 * @param catalogTracker
 * @param service
 * @throws KeeperException
 * @throws IOException
 */
public AssignmentManager(Server server, ServerManager serverManager, CatalogTracker catalogTracker,
        final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster,
        final TableLockManager tableLockManager)
        throws KeeperException, IOException, CoordinatedStateException {
    super(server.getZooKeeper());
    this.server = server;
    this.serverManager = serverManager;
    this.catalogTracker = catalogTracker;
    this.executorService = service;
    this.regionsToReopen = Collections.synchronizedMap(new HashMap<String, HRegionInfo>());
    Configuration conf = server.getConfiguration();
    // Only read favored nodes if using the favored nodes load balancer.
    this.shouldAssignRegionsWithFavoredNodes = conf
            .getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class)
            .equals(FavoredNodeLoadBalancer.class);
    try {
        if (server.getCoordinatedStateManager() != null) {
            this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager();
        } else {
            this.tableStateManager = null;
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException();
    }
    // This is the max attempts, not retries, so it should be at least 1.
    this.maximumAttempts = Math.max(1,
            this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
    this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration()
            .getLong("hbase.meta.assignment.retry.sleeptime", 1000l);
    this.balancer = balancer;
    int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
    this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(maxThreads, 60L, TimeUnit.SECONDS,
            Threads.newDaemonThreadFactory("AM."));
    this.regionStates = new RegionStates(server, serverManager);

    this.bulkAssignWaitTillAllAssigned = conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false);
    this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7);
    this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3);

    int workers = conf.getInt("hbase.assignment.zkevent.workers", 20);
    ThreadFactory threadFactory = Threads.newDaemonThreadFactory("AM.ZK.Worker");
    zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L, TimeUnit.SECONDS, threadFactory);
    this.tableLockManager = tableLockManager;

    this.metricsAssignmentManager = new MetricsAssignmentManager();
}

From source file:org.apache.hadoop.hive.ql.hooks.LineageInfo.java

/**
 * Constructor.
 */
public LineageInfo() {
    index = Collections.synchronizedMap(new LinkedHashMap<DependencyKey, Dependency>());
}

From source file:org.sakaiproject.authz.impl.DbAuthzGroupService.java

/**
 * Final initialization, once all dependencies are set.
 *///  w ww. ja va2  s .  c o  m
public void init() {
    try {
        // The observer will be notified whenever there are new events. Priority observers get notified first, before normal observers.
        eventTrackingService().addPriorityObserver(this);

        // if we are auto-creating our schema, check and create
        if (m_autoDdl) {
            sqlService().ddl(this.getClass().getClassLoader(), "sakai_realm");
        }

        super.init();
        setDbAuthzGroupSql(sqlService().getVendor());

        // pre-cache role and function names
        cacheRoleNames();
        cacheFunctionNames();
        m_realmRoleGRCache = m_memoryService
                .newCache("org.sakaiproject.authz.impl.DbAuthzGroupService.realmRoleGroupCache");
        M_log.info("init(): table: " + m_realmTableName + " external locks: " + m_useExternalLocks);

        authzUserGroupIdsCache = m_memoryService
                .newCache("org.sakaiproject.authz.impl.DbAuthzGroupService.authzUserGroupIdsCache");

        maintainRolesCache = m_memoryService
                .newCache("org.sakaiproject.authz.impl.DbAuthzGroupService.maintainRolesCache");
        //get the set of maintain roles and cache them on startup
        getMaintainRoles();

        refreshTaskInterval = initConfig(REFRESH_INTERVAL_PROPKEY,
                serverConfigurationService().getString(REFRESH_INTERVAL_PROPKEY), refreshTaskInterval);
        refreshMaxTime = initConfig(REFRESH_MAX_TIME_PROPKEY,
                serverConfigurationService().getString(REFRESH_MAX_TIME_PROPKEY), refreshMaxTime);

        refreshQueue = Collections.synchronizedMap(new HashMap<String, AuthzGroup>());

        refreshScheduler = Executors.newSingleThreadScheduledExecutor();
        refreshScheduler.scheduleWithFixedDelay(new RefreshAuthzGroupTask(), 120, // minimally wait 2 mins for sakai to start
                refreshTaskInterval, // delay before running again
                TimeUnit.SECONDS);
    } catch (Exception t) {
        M_log.warn("init(): ", t);
    }
}

From source file:eu.eidas.auth.commons.PersonalAttribute.java

/**
 * Setter for the complex value.//from  w w w.ja v a 2 s .c  om
 *
 * @param complexVal The personal attribute Complex value.
 */
public synchronized void setComplexValue(@Nullable Map<String, String> complexVal) {
    // no defensive copy needed when there is no reference update
    if (this.complexValue == complexVal) {
        return;
    }
    if (null == complexVal || complexVal.isEmpty()) {
        this.complexValue.clear();
        return;
    }
    this.complexValue = Collections.synchronizedMap(new HashMap<String, String>(complexVal));
}