Example usage for java.util LinkedHashMap put

List of usage examples for java.util LinkedHashMap put

Introduction

In this page you can find the example usage for java.util LinkedHashMap put.

Prototype

V put(K key, V value);

Source Link

Document

Associates the specified value with the specified key in this map (optional operation).

Usage

From source file:com.thoughtworks.go.server.service.support.ThreadInformationProvider.java

private TreeMap<Long, Map<String, Object>> getStackTraceInformation(ThreadMXBean threadMXBean) {
    TreeMap<Long, Map<String, Object>> traces = new TreeMap<>();
    ThreadInfo[] threadInfos = threadMXBean.dumpAllThreads(true, true);
    for (ThreadInfo threadInfo : threadInfos) {
        LinkedHashMap<String, Object> threadStackTrace = new LinkedHashMap<>();
        threadStackTrace.put("Id", threadInfo.getThreadId());
        threadStackTrace.put("Name", threadInfo.getThreadName());
        threadStackTrace.put("State", threadInfo.getThreadState());

        LinkedHashMap<String, Object> lockMonitorInfo = new LinkedHashMap<>();
        MonitorInfo[] lockedMonitors = threadInfo.getLockedMonitors();
        ArrayList<Map<String, Object>> lockedMonitorsJson = new ArrayList<>();

        for (MonitorInfo lockedMonitor : lockedMonitors) {
            LinkedHashMap<String, Object> lockedMonitorJson = new LinkedHashMap<>();
            lockedMonitorJson.put("Class", lockedMonitor.getClassName());
            lockedMonitorJson.put("IdentityHashCode", lockedMonitor.getIdentityHashCode());
            lockedMonitorJson.put("LockedStackDepth", lockedMonitor.getLockedStackDepth());
            lockedMonitorJson.put("StackFrame", lockedMonitor.getLockedStackFrame().toString());
            lockedMonitorsJson.add(lockedMonitorJson);
        }/*from w w  w  .ja va2s.  c o m*/

        lockMonitorInfo.put("Locked Monitors", lockedMonitorsJson);
        lockMonitorInfo.put("Locked Synchronizers", asJSON(threadInfo.getLockedSynchronizers()));
        threadStackTrace.put("Lock Monitor Info", lockMonitorInfo);

        LinkedHashMap<String, Object> blockedInfo = new LinkedHashMap<>();
        blockedInfo.put("Blocked Time", threadInfo.getBlockedTime() == -1 ? null : threadInfo.getBlockedTime());
        blockedInfo.put("Blocked Count", threadInfo.getBlockedCount());
        threadStackTrace.put("Blocked Info", blockedInfo);

        LinkedHashMap<String, Object> timeInfo = new LinkedHashMap<>();
        timeInfo.put("Waited Time", threadInfo.getWaitedTime() == -1 ? null : threadInfo.getWaitedTime());
        timeInfo.put("Waited Count", threadInfo.getWaitedCount());
        threadStackTrace.put("Time Info", timeInfo);

        LinkedHashMap<String, Object> lockInfoMap = new LinkedHashMap<>();
        LockInfo lockInfo = threadInfo.getLockInfo();
        lockInfoMap.put("Locked On", asJSON(lockInfo));
        lockInfoMap.put("Lock Owner Thread Id",
                threadInfo.getLockOwnerId() == -1 ? null : threadInfo.getLockOwnerId());
        lockInfoMap.put("Lock Owner Thread Name", threadInfo.getLockOwnerName());
        threadStackTrace.put("Lock Info", lockInfoMap);

        LinkedHashMap<String, Object> stateInfo = new LinkedHashMap<>();
        stateInfo.put("Suspended", threadInfo.isSuspended());
        stateInfo.put("InNative", threadInfo.isInNative());
        threadStackTrace.put("State Info", stateInfo);

        threadStackTrace.put("Stack Trace", asJSON(threadInfo.getStackTrace()));
        traces.put(threadInfo.getThreadId(), threadStackTrace);
    }
    return traces;
}

From source file:me.code4fun.roboq.Response.java

public Map<String, String> headers() {
    LinkedHashMap<String, String> headers = new LinkedHashMap<String, String>();
    for (Header h : httpResponse.getAllHeaders()) {
        headers.put(h.getName(), h.getValue());
    }//w  ww . ja va 2  s  .  c o m
    return headers;
}

From source file:edu.jhuapl.openessence.web.util.ControllerUtils.java

/**
 * Returns a new map that is sorted and then limited to the top {@code limit} values.  It then places the map back
 * in the original sort order minus anything that has been cut.
 *//* w ww .  j av a 2  s.  c om*/
public static LinkedHashMap<String, Double> getSortedAndLimitedMap(LinkedHashMap<String, Double> map,
        Integer limit, String limitLabel) {
    //test if we need to trim
    if (limit <= 0 || limit >= map.size()) {
        return map;
    }

    //sort by value
    Map<String, Double> sortedMap = ControllerUtils.getSortedByValueMap(map);
    //limit and combine results
    Map<String, Double> sortedLimitedMap = ControllerUtils.getLimitedMap(sortedMap, limit, limitLabel);

    //put the original sort order back (minus the values combined)
    LinkedHashMap<String, Double> originalSortResultMap = new LinkedHashMap<String, Double>(limit);
    LinkedHashMap<String, Double> passedValuesMap = new LinkedHashMap<String, Double>(map.size());
    int i = 0;
    for (String key : map.keySet()) {
        if (i < limit) {
            if (sortedLimitedMap.containsKey(key)) {
                Double value = sortedLimitedMap.get(key);
                //if value is not null/zero, add it and increment
                if (value != null && !Double.isNaN(value) && value > 0) {
                    originalSortResultMap.put(key, value);
                    i++;
                } else { //put it in a list of passed up values for inclusion at the end
                    passedValuesMap.put(key, value);
                }
            }
        }
    }
    //if we still have room after adding all sorted non zero values... fill the rest with passed values
    if (i < limit) {
        for (String key : passedValuesMap.keySet()) {
            if (i < limit) {
                originalSortResultMap.put(key, passedValuesMap.get(key));
                i++;
            }
        }
    }
    //add combined field if it is not null (indicates it was used even if the value is 0)
    Double cVal = sortedLimitedMap.get(limitLabel);
    if (cVal != null && !Double.isNaN(cVal)) {
        originalSortResultMap.put(limitLabel, cVal);
    }
    return originalSortResultMap;
}

From source file:hydrograph.ui.propertywindow.widgets.customwidgets.databasecomponents.DatabaseTestConnectionWidget.java

@Override
public LinkedHashMap<String, Object> getProperties() {
    LinkedHashMap<String, Object> property = new LinkedHashMap<>();
    property.put(propertyName, this.initialMap);

    setToolTipErrorMessage();/*www  .  j  a v  a2s  .  c om*/
    return property;
}

From source file:com.streamsets.pipeline.stage.destination.hive.HiveMetastoreTargetIT.java

@Test
public void testNonAvroTable() throws Exception {
    executeUpdate("CREATE  TABLE `tbl_csv` (id int, value string) partitioned by (dt String)"
            + " ROW FORMAT DELIMITED " + " FIELDS TERMINATED BY ',' " + " STORED AS TEXTFILE ");
    HiveMetastoreTarget hiveTarget = new HiveMetastoreTargetBuilder().build();

    TargetRunner runner = new TargetRunner.Builder(HiveMetastoreTarget.class, hiveTarget)
            .setOnRecordError(OnRecordError.TO_ERROR).build();
    runner.runInit();/*www . jav a  2s.c o  m*/

    LinkedHashMap<String, HiveTypeInfo> columns = new LinkedHashMap<>();
    columns.put("name", HiveType.STRING.getSupport().generateHiveTypeInfoFromResultSet("STRING"));

    LinkedHashMap<String, HiveTypeInfo> partitions = new LinkedHashMap<>();
    partitions.put("dt", HiveType.STRING.getSupport().generateHiveTypeInfoFromResultSet("STRING"));

    Field newTableField = HiveMetastoreUtil.newSchemaMetadataFieldBuilder("default", "tbl_csv", columns,
            partitions, true, BaseHiveIT.getDefaultWareHouseDir(),
            HiveMetastoreUtil.generateAvroSchema(columns, "tbl"));

    Record record = RecordCreator.create();
    record.set(newTableField);
    Assert.assertTrue(HiveMetastoreUtil.isSchemaChangeRecord(record));

    runner.runWrite(ImmutableList.of(record));

    Assert.assertEquals("There should be one error record", 1L, runner.getErrorRecords().size());
    Record errorRecord = runner.getErrorRecords().get(0);
    Assert.assertEquals("Error codes mismatch", Errors.HIVE_32.name(), errorRecord.getHeader().getErrorCode());
}

From source file:com.streamsets.pipeline.stage.destination.hive.HiveMetastoreTargetIT.java

@Test
public void testNonPartitionedInfoToPartitionedTable() throws Exception {
    HiveMetastoreTarget hiveTarget = new HiveMetastoreTargetBuilder().build();

    TargetRunner runner = new TargetRunner.Builder(HiveMetastoreTarget.class, hiveTarget)
            .setOnRecordError(OnRecordError.TO_ERROR).build();
    runner.runInit();//from w  w  w  .  j  a  va 2  s  .c om

    LinkedHashMap<String, HiveTypeInfo> columns = new LinkedHashMap<>();
    columns.put("name", HiveType.STRING.getSupport().generateHiveTypeInfoFromResultSet("STRING"));

    Field newTableField = HiveMetastoreUtil.newSchemaMetadataFieldBuilder("default", "tbl", columns, null, true,
            BaseHiveIT.getDefaultWareHouseDir(), HiveMetastoreUtil.generateAvroSchema(columns, "tbl"));

    Record record = RecordCreator.create();
    record.set(newTableField);
    Assert.assertTrue(HiveMetastoreUtil.isSchemaChangeRecord(record));

    runner.runWrite(ImmutableList.of(record));
    Assert.assertEquals("There should be no error records", 0, runner.getErrorRecords().size());
    LinkedHashMap<String, String> partitionVals = new LinkedHashMap<String, String>();
    partitionVals.put("dt", "2016");

    Field newPartitionField = HiveMetastoreUtil.newPartitionMetadataFieldBuilder("default", "tbl",
            partitionVals, "/user/hive/warehouse/tbl/dt=2016");
    record = RecordCreator.create();
    record.set(newPartitionField);
    runner.runWrite(ImmutableList.of(record));
    Assert.assertEquals("There should be one error record", 1, runner.getErrorRecords().size());
    Record errorRecord = runner.getErrorRecords().get(0);
    Assert.assertEquals(errorRecord.getHeader().getErrorCode(), Errors.HIVE_27.name());
}

From source file:architecture.user.spring.config.SecurityConfig.java

@Bean(name = "authenticationEntryPoint")
public DelegatingAuthenticationEntryPoint authenticationEntryPoint() {
    LinkedHashMap<RequestMatcher, AuthenticationEntryPoint> entryPoints = new LinkedHashMap<RequestMatcher, AuthenticationEntryPoint>();
    entryPoints.put(nonAjaxRequestMatcher(), loginUrlAuthenticationEntryPoint());
    DelegatingAuthenticationEntryPoint authenticationEntryPoint = new DelegatingAuthenticationEntryPoint(
            entryPoints);/*from w w  w  .  j  a v a 2s.com*/
    authenticationEntryPoint.setDefaultEntryPoint(new Http403ForbiddenEntryPoint());
    return authenticationEntryPoint;
}

From source file:hydrograph.ui.propertywindow.widgets.customwidgets.SingleColumnWidget.java

@Override
public LinkedHashMap<String, Object> getProperties() {
    LinkedHashMap<String, Object> property = new LinkedHashMap<>();
    if (this.set == null) {
        this.set = new ArrayList<String>();
    }/*ww w  .j  a  va 2  s.  com*/
    property.put(propertyName, this.set);
    return property;
}

From source file:org.opendatakit.configuration.SecurityConfiguration.java

@Bean
DelegatingAuthenticationEntryPoint delegatingAuthenticationEntryPoint()
        throws ODKEntityNotFoundException, ODKOverQuotaException, ODKDatastoreException, PropertyVetoException {
    LinkedHashMap<RequestMatcher, AuthenticationEntryPoint> entryPoints = new LinkedHashMap<RequestMatcher, AuthenticationEntryPoint>();
    entryPoints.put(new RequestHeaderRequestMatcher("X-OpenRosa-Version", "1.0"), digestEntryPoint());
    entryPoints.put(new RequestHeaderRequestMatcher("X-OpenDataKit-Version", "2.0"), digestEntryPoint());
    DelegatingAuthenticationEntryPoint delegatingAuthenticationEntryPoint = new DelegatingAuthenticationEntryPoint(
            entryPoints);//from w ww .j a  v a  2  s  . co  m
    delegatingAuthenticationEntryPoint.setDefaultEntryPoint(digestEntryPoint());
    return delegatingAuthenticationEntryPoint;
}