Example usage for java.util Set equals

List of usage examples for java.util Set equals

Introduction

In this page you can find the example usage for java.util Set equals.

Prototype

boolean equals(Object o);

Source Link

Document

Compares the specified object with this set for equality.

Usage

From source file:MainClass.java

public static void main(String[] a) {
    String elements[] = { "A", "B", "C", "D", "E" };
    Set set = new HashSet(Arrays.asList(elements));

    elements = new String[] { "A", "B", "C", "D" };
    Set set2 = new HashSet(Arrays.asList(elements));

    System.out.println(set.equals(set2));
}

From source file:Main.java

public static void main(String[] a) {
    String elements[] = { "A", "B", "C", "D", "E" };
    Set<String> set = new HashSet<String>(Arrays.asList(elements));

    elements = new String[] { "A", "B", "C", "D" };
    Set<String> set2 = new HashSet<String>(Arrays.asList(elements));

    System.out.println(set.equals(set2));
}

From source file:voldemort.store.readonly.swapper.StoreSwapper.java

public static void main(String[] args) throws Exception {
    OptionParser parser = new OptionParser();
    parser.accepts("help", "print usage information");
    parser.accepts("cluster", "[REQUIRED] the voldemort cluster.xml file ").withRequiredArg()
            .describedAs("cluster.xml");
    parser.accepts("name", "[REQUIRED] the name of the store to swap").withRequiredArg()
            .describedAs("store-name");
    parser.accepts("servlet-path", "the path for the read-only management servlet").withRequiredArg()
            .describedAs("path");
    parser.accepts("file", "[REQUIRED] uri of a directory containing the new store files").withRequiredArg()
            .describedAs("uri");
    parser.accepts("timeout", "http timeout for the fetch in ms").withRequiredArg().describedAs("timeout ms")
            .ofType(Integer.class);
    parser.accepts("rollback", "Rollback store to older version");
    parser.accepts("admin", "Use admin services. Default = false");
    parser.accepts("push-version", "[REQUIRED] Version of push to fetch / rollback-to").withRequiredArg()
            .ofType(Long.class);

    OptionSet options = parser.parse(args);
    if (options.has("help")) {
        parser.printHelpOn(System.out);
        System.exit(0);//  w  w w. ja  v  a  2 s.co  m
    }

    Set<String> missing = CmdUtils.missing(options, "cluster", "name", "file", "push-version");
    if (missing.size() > 0) {
        if (!(missing.equals(ImmutableSet.of("file")) && (options.has("rollback")))) {
            System.err.println("Missing required arguments: " + Joiner.on(", ").join(missing));
            parser.printHelpOn(System.err);
            System.exit(1);
        }
    }

    String clusterXml = (String) options.valueOf("cluster");
    String storeName = (String) options.valueOf("name");
    String mgmtPath = CmdUtils.valueOf(options, "servlet-path", "read-only/mgmt");
    String filePath = (String) options.valueOf("file");
    int timeoutMs = CmdUtils.valueOf(options, "timeout",
            (int) (3 * Time.SECONDS_PER_HOUR * Time.MS_PER_SECOND));
    boolean useAdminServices = options.has("admin");
    boolean rollbackStore = options.has("rollback");
    Long pushVersion = (Long) options.valueOf("push-version");

    String clusterStr = FileUtils.readFileToString(new File(clusterXml));
    Cluster cluster = new ClusterMapper().readCluster(new StringReader(clusterStr));
    ExecutorService executor = Executors.newFixedThreadPool(cluster.getNumberOfNodes());
    StoreSwapper swapper = null;
    AdminClient adminClient = null;

    DefaultHttpClient httpClient = null;
    if (useAdminServices) {
        adminClient = new AdminClient(cluster, new AdminClientConfig(), new ClientConfig());
        swapper = new AdminStoreSwapper(cluster, executor, adminClient, timeoutMs);
    } else {
        int numConnections = cluster.getNumberOfNodes() + 3;
        ThreadSafeClientConnManager connectionManager = new ThreadSafeClientConnManager();
        httpClient = new DefaultHttpClient(connectionManager);

        HttpParams clientParams = httpClient.getParams();

        connectionManager.setMaxTotal(numConnections);
        connectionManager.setDefaultMaxPerRoute(numConnections);
        HttpConnectionParams.setSoTimeout(clientParams, timeoutMs);

        swapper = new HttpStoreSwapper(cluster, executor, httpClient, mgmtPath);
    }

    try {
        long start = System.currentTimeMillis();
        if (rollbackStore) {
            swapper.invokeRollback(storeName, pushVersion.longValue());
        } else {
            swapper.swapStoreData(storeName, filePath, pushVersion.longValue());
        }
        long end = System.currentTimeMillis();
        logger.info("Succeeded on all nodes in " + ((end - start) / Time.MS_PER_SECOND) + " seconds.");
    } finally {
        if (useAdminServices && adminClient != null)
            adminClient.close();
        executor.shutdownNow();
        executor.awaitTermination(1, TimeUnit.SECONDS);
        VoldemortIOUtils.closeQuietly(httpClient);
    }
    System.exit(0);
}

From source file:TestCipher.java

public static void main(String args[]) throws Exception {
    Set set = new HashSet();
    Random random = new Random();
    for (int i = 0; i < 10; i++) {
        Point point = new Point(random.nextInt(1000), random.nextInt(2000));
        set.add(point);// www.  jav  a  2 s . c  om
    }
    int last = random.nextInt(5000);

    // Create Key
    byte key[] = password.getBytes();
    DESKeySpec desKeySpec = new DESKeySpec(key);
    SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("DES");
    SecretKey secretKey = keyFactory.generateSecret(desKeySpec);

    // Create Cipher
    Cipher desCipher = Cipher.getInstance("DES/ECB/PKCS5Padding");
    desCipher.init(Cipher.ENCRYPT_MODE, secretKey);

    // Create stream
    FileOutputStream fos = new FileOutputStream("out.des");
    BufferedOutputStream bos = new BufferedOutputStream(fos);
    CipherOutputStream cos = new CipherOutputStream(bos, desCipher);
    ObjectOutputStream oos = new ObjectOutputStream(cos);

    // Write objects
    oos.writeObject(set);
    oos.writeInt(last);
    oos.flush();
    oos.close();

    // Change cipher mode
    desCipher.init(Cipher.DECRYPT_MODE, secretKey);

    // Create stream
    FileInputStream fis = new FileInputStream("out.des");
    BufferedInputStream bis = new BufferedInputStream(fis);
    CipherInputStream cis = new CipherInputStream(bis, desCipher);
    ObjectInputStream ois = new ObjectInputStream(cis);

    // Read objects
    Set set2 = (Set) ois.readObject();
    int last2 = ois.readInt();
    ois.close();

    // Compare original with what was read back
    int count = 0;
    if (set.equals(set2)) {
        System.out.println("Set1: " + set);
        System.out.println("Set2: " + set2);
        System.out.println("Sets are okay.");
        count++;
    }
    if (last == last2) {
        System.out.println("int1: " + last);
        System.out.println("int2: " + last2);
        System.out.println("ints are okay.");
        count++;
    }
    if (count != 2) {
        System.out.println("Problem during encryption/decryption");
    }
}

From source file:voldemort.VoldemortAdminTool.java

@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
    OptionParser parser = new OptionParser();
    // This is a generic argument that should be eventually supported by all
    // RW operations.
    // If you omit this argument the operation will be executed in a "batch"
    // mode which is useful for scripting
    // Otherwise you will be presented with a summary of changes and with a
    // Y/N prompt
    parser.accepts("auto", "[OPTIONAL] enable auto/batch mode");
    parser.accepts("help", "print help information");
    parser.accepts("url", "[REQUIRED] bootstrap URL").withRequiredArg().describedAs("bootstrap-url")
            .ofType(String.class);
    parser.accepts("node", "node id").withRequiredArg().describedAs("node-id").ofType(Integer.class);
    parser.accepts("delete-partitions", "Delete partitions").withRequiredArg().describedAs("partition-ids")
            .withValuesSeparatedBy(',').ofType(Integer.class);
    parser.accepts("restore", "Restore from replication [ Optional parallelism param - Default - 5 ]")
            .withOptionalArg().describedAs("parallelism").ofType(Integer.class);
    parser.accepts("ascii", "Fetch keys as ASCII");
    parser.accepts("fetch-keys", "Fetch keys").withOptionalArg().describedAs("partition-ids")
            .withValuesSeparatedBy(',').ofType(Integer.class);
    parser.accepts("fetch-entries", "Fetch full entries").withOptionalArg().describedAs("partition-ids")
            .withValuesSeparatedBy(',').ofType(Integer.class);
    parser.accepts("outdir", "Output directory").withRequiredArg().describedAs("output-directory")
            .ofType(String.class);
    parser.accepts("nodes", "list of nodes").withRequiredArg().describedAs("nodes").withValuesSeparatedBy(',')
            .ofType(Integer.class);
    parser.accepts("stores", "Store names").withRequiredArg().describedAs("store-names")
            .withValuesSeparatedBy(',').ofType(String.class);
    parser.accepts("store", "Store name for querying keys").withRequiredArg().describedAs("store-name")
            .ofType(String.class);
    parser.accepts("add-stores", "Add stores in this stores.xml").withRequiredArg()
            .describedAs("stores.xml containing just the new stores").ofType(String.class);
    parser.accepts("delete-store", "Delete store").withRequiredArg().describedAs("store-name")
            .ofType(String.class);
    parser.accepts("update-entries", "Insert or update entries").withRequiredArg()
            .describedAs("input-directory").ofType(String.class);
    parser.accepts("get-metadata", "retreive metadata information " + MetadataStore.METADATA_KEYS)
            .withOptionalArg().describedAs("metadata-key").ofType(String.class);
    parser.accepts("check-metadata",
            "retreive metadata information from all nodes and checks if they are consistent across [ "
                    + MetadataStore.CLUSTER_KEY + " | " + MetadataStore.STORES_KEY + " | "
                    + MetadataStore.REBALANCING_SOURCE_CLUSTER_XML + " | " + MetadataStore.SERVER_STATE_KEY
                    + " ]")
            .withRequiredArg().describedAs("metadata-key").ofType(String.class);
    parser.accepts("ro-metadata", "retrieve version information [current | max | storage-format]")
            .withRequiredArg().describedAs("type").ofType(String.class);
    parser.accepts("truncate", "truncate a store").withRequiredArg().describedAs("store-name")
            .ofType(String.class);
    parser.accepts("set-metadata",
            "Forceful setting of metadata [ " + MetadataStore.CLUSTER_KEY + " | " + MetadataStore.STORES_KEY
                    + " | " + MetadataStore.SERVER_STATE_KEY + " | "
                    + MetadataStore.REBALANCING_SOURCE_CLUSTER_XML + " | "
                    + MetadataStore.REBALANCING_STEAL_INFO + " ]")
            .withRequiredArg().describedAs("metadata-key").ofType(String.class);
    parser.accepts("set-metadata-value",
            "The value for the set-metadata [ " + MetadataStore.CLUSTER_KEY + " | " + MetadataStore.STORES_KEY
                    + ", " + MetadataStore.REBALANCING_SOURCE_CLUSTER_XML + ", "
                    + MetadataStore.REBALANCING_STEAL_INFO + " ] - xml file location, [ "
                    + MetadataStore.SERVER_STATE_KEY + " ] - " + MetadataStore.VoldemortState.NORMAL_SERVER
                    + "," + MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)
            .withRequiredArg().describedAs("metadata-value").ofType(String.class);
    parser.accepts("set-metadata-pair",
            "Atomic setting of metadata pair [ " + MetadataStore.CLUSTER_KEY + " & " + MetadataStore.STORES_KEY
                    + " ]")
            .withRequiredArg().describedAs("metadata-keys-pair").withValuesSeparatedBy(',')
            .ofType(String.class);
    parser.accepts("set-metadata-value-pair",
            "The value for the set-metadata pair [ " + MetadataStore.CLUSTER_KEY + " & "
                    + MetadataStore.STORES_KEY + " ]")
            .withRequiredArg().describedAs("metadata-value-pair").withValuesSeparatedBy(',')
            .ofType(String.class);
    parser.accepts("clear-rebalancing-metadata", "Remove the metadata related to rebalancing");
    parser.accepts("async", "a) Get a list of async job ids [get] b) Stop async job ids [stop] ")
            .withRequiredArg().describedAs("op-type").ofType(String.class);
    parser.accepts("async-id", "Comma separated list of async ids to stop").withOptionalArg()
            .describedAs("job-ids").withValuesSeparatedBy(',').ofType(Integer.class);
    parser.accepts("repair-job", "Clean after rebalancing is done");
    parser.accepts("prune-job", "Prune versioned put data, after rebalancing");
    parser.accepts("purge-slops", "Purge the slop stores selectively, based on nodeId or zoneId");
    parser.accepts("native-backup", "Perform a native backup").withRequiredArg().describedAs("store-name")
            .ofType(String.class);
    parser.accepts("backup-dir").withRequiredArg().describedAs("backup-directory").ofType(String.class);
    parser.accepts("backup-timeout").withRequiredArg()
            .describedAs("minutes to wait for backup completion, default 30 mins").ofType(Integer.class);
    parser.accepts("backup-verify", "If provided, backup will also verify checksum (with extra overhead)");
    parser.accepts("backup-incremental", "Perform an incremental backup for point-in-time recovery."
            + " By default backup has latest consistent snapshot.");
    parser.accepts("zone", "zone id").withRequiredArg().describedAs("zone-id").ofType(Integer.class);
    parser.accepts("rollback", "rollback a store").withRequiredArg().describedAs("store-name")
            .ofType(String.class);
    parser.accepts("version", "Push version of store to rollback to").withRequiredArg().describedAs("version")
            .ofType(Long.class);
    parser.accepts("verify-metadata-version", "Verify the version of Metadata on all the cluster nodes");
    parser.accepts("synchronize-metadata-version", "Synchronize the metadata versions across all the nodes.");
    parser.accepts("reserve-memory", "Memory in MB to reserve for the store").withRequiredArg()
            .describedAs("size-in-mb").ofType(Long.class);
    parser.accepts("query-key", "Get values of a key on specific node").withRequiredArg()
            .describedAs("query-key").ofType(String.class);
    parser.accepts("query-key-format", "Format of the query key. Can be one of [hex|readable]")
            .withRequiredArg().describedAs("key-format").ofType(String.class);
    parser.accepts("show-routing-plan", "Routing plan of the specified keys").withRequiredArg()
            .describedAs("keys-to-be-routed").withValuesSeparatedBy(',').ofType(String.class);
    parser.accepts("mirror-from-url", "Cluster url to mirror data from").withRequiredArg()
            .describedAs("mirror-cluster-bootstrap-url").ofType(String.class);
    parser.accepts("mirror-node", "Node id in the mirror cluster to mirror from").withRequiredArg()
            .describedAs("id-of-mirror-node").ofType(Integer.class);
    parser.accepts("fetch-orphaned", "Fetch any orphaned keys/entries in the node");
    parser.accepts("set-quota", "Enforce some quota on the servers").withRequiredArg().describedAs("quota-type")
            .ofType(String.class);
    parser.accepts("quota-value", "Value of the quota enforced on the servers").withRequiredArg()
            .describedAs("quota-value").ofType(String.class);
    parser.accepts("unset-quota", "Remove some quota already enforced on the servers").withRequiredArg()
            .describedAs("quota-type").ofType(String.class);
    // TODO add a way to retrieve all quotas for a given store.
    parser.accepts("get-quota", "Retrieve some quota already enforced on the servers").withRequiredArg()
            .describedAs("quota-type").ofType(String.class);

    OptionSet options = parser.parse(args);

    if (options.has("help")) {
        printHelp(System.out, parser);
        System.exit(0);//from  w  ww.j a v  a  2s  .com
    }

    Set<String> missing = CmdUtils.missing(options, "url", "node");
    if (missing.size() > 0) {
        // Not the most elegant way to do this
        // basically check if only "node" is missing for these set of
        // options; all these can live without explicit node ids
        if (!(missing.equals(ImmutableSet.of("node"))
                && (options.has("add-stores") || options.has("delete-store") || options.has("ro-metadata")
                        || options.has("set-metadata") || options.has("set-metadata-pair")
                        || options.has("get-metadata") || options.has("check-metadata"))
                || options.has("truncate") || options.has("clear-rebalancing-metadata") || options.has("async")
                || options.has("native-backup") || options.has("rollback")
                || options.has("verify-metadata-version") || options.has("reserve-memory")
                || options.has("purge-slops") || options.has("show-routing-plan") || options.has("query-key")
                || options.has("set-quota") || options.has("unset-quota") || options.has("get-quota"))) {
            System.err.println("Missing required arguments: " + Joiner.on(", ").join(missing));
            printHelp(System.err, parser);
            System.exit(1);
        }
    }

    try {
        String url = (String) options.valueOf("url");
        Integer nodeId = CmdUtils.valueOf(options, "node", -1);
        int parallelism = CmdUtils.valueOf(options, "restore", 5);
        Integer zoneId = CmdUtils.valueOf(options, "zone", -1);

        AdminClient adminClient = new AdminClient(url, new AdminClientConfig(), new ClientConfig());

        List<String> storeNames = null;
        if (options.has("store") && options.has("stores")) {
            throw new VoldemortException("Must not specify both --stores and --store options");
        } else if (options.has("stores")) {
            storeNames = (List<String>) options.valuesOf("stores");
        } else if (options.has("store")) {
            storeNames = Arrays.asList((String) options.valueOf("store"));
        }

        String outputDir = null;
        if (options.has("outdir")) {
            outputDir = (String) options.valueOf("outdir");
        }

        if (options.has("add-stores")) {
            String storesXml = (String) options.valueOf("add-stores");
            executeAddStores(adminClient, storesXml, nodeId);
        } else if (options.has("async")) {
            String asyncKey = (String) options.valueOf("async");
            List<Integer> asyncIds = null;
            if (options.hasArgument("async-id"))
                asyncIds = (List<Integer>) options.valuesOf("async-id");
            executeAsync(nodeId, adminClient, asyncKey, asyncIds);
        } else if (options.has("check-metadata")) {
            String metadataKey = (String) options.valueOf("check-metadata");
            executeCheckMetadata(adminClient, metadataKey);
        } else if (options.has("delete-partitions")) {
            System.out.println("Starting delete-partitions");
            List<Integer> partitionIdList = (List<Integer>) options.valuesOf("delete-partitions");
            executeDeletePartitions(nodeId, adminClient, partitionIdList, storeNames);
            System.out.println("Finished delete-partitions");
        } else if (options.has("ro-metadata")) {
            String type = (String) options.valueOf("ro-metadata");
            executeROMetadata(nodeId, adminClient, storeNames, type);
        } else if (options.has("reserve-memory")) {
            if (!options.has("stores")) {
                Utils.croak("Specify the list of stores to reserve memory");
            }
            long reserveMB = (Long) options.valueOf("reserve-memory");
            adminClient.quotaMgmtOps.reserveMemory(nodeId, storeNames, reserveMB);
        } else if (options.has("get-metadata")) {
            String metadataKey = ALL_METADATA;
            if (options.hasArgument("get-metadata")) {
                metadataKey = (String) options.valueOf("get-metadata");
            }
            executeGetMetadata(nodeId, adminClient, metadataKey, outputDir);
        } else if (options.has("mirror-from-url")) {
            if (!options.has("mirror-node")) {
                Utils.croak("Specify the mirror node to fetch from");
            }

            if (nodeId == -1) {
                System.err.println("Cannot run mirroring without node id");
                System.exit(1);
            }
            Integer mirrorNodeId = CmdUtils.valueOf(options, "mirror-node", -1);
            if (mirrorNodeId == -1) {
                System.err.println("Cannot run mirroring without mirror node id");
                System.exit(1);
            }
            adminClient.restoreOps.mirrorData(nodeId, mirrorNodeId, (String) options.valueOf("mirror-from-url"),
                    storeNames);
        } else if (options.has("clear-rebalancing-metadata")) {
            executeClearRebalancing(nodeId, adminClient);
        } else if (options.has("prune-job")) {
            if (storeNames == null) {
                Utils.croak("Must specify --stores to run the prune job");
            }
            executePruneJob(nodeId, adminClient, storeNames);
        } else if (options.has("fetch-keys")) {
            boolean useAscii = options.has("ascii");
            System.out.println("Starting fetch keys");
            List<Integer> partitionIdList = null;
            if (options.hasArgument("fetch-keys"))
                partitionIdList = (List<Integer>) options.valuesOf("fetch-keys");
            executeFetchKeys(nodeId, adminClient, partitionIdList, outputDir, storeNames, useAscii,
                    options.has("fetch-orphaned"));
        } else if (options.has("repair-job")) {
            executeRepairJob(nodeId, adminClient);
        } else if (options.has("set-metadata-pair")) {
            List<String> metadataKeyPair = (List<String>) options.valuesOf("set-metadata-pair");
            if (metadataKeyPair.size() != 2) {
                throw new VoldemortException(
                        "Missing set-metadata-pair keys (only two keys are needed and allowed)");
            }
            if (!options.has("set-metadata-value-pair")) {
                throw new VoldemortException("Missing set-metadata-value-pair");
            } else {

                List<String> metadataValuePair = (List<String>) options.valuesOf("set-metadata-value-pair");
                if (metadataValuePair.size() != 2) {
                    throw new VoldemortException(
                            "Missing set-metadata--value-pair values (only two values are needed and allowed)");
                }
                if (metadataKeyPair.contains(MetadataStore.CLUSTER_KEY)
                        && metadataKeyPair.contains(MetadataStore.STORES_KEY)) {
                    ClusterMapper clusterMapper = new ClusterMapper();
                    StoreDefinitionsMapper storeDefsMapper = new StoreDefinitionsMapper();
                    // original metadata
                    Integer nodeIdToGetStoreXMLFrom = nodeId;
                    if (nodeId < 0) {
                        Collection<Node> nodes = adminClient.getAdminClientCluster().getNodes();
                        if (nodes.isEmpty()) {
                            throw new VoldemortException("No nodes in this cluster");
                        } else {
                            nodeIdToGetStoreXMLFrom = nodes.iterator().next().getId();
                        }
                    }
                    Versioned<String> storesXML = adminClient.metadataMgmtOps
                            .getRemoteMetadata(nodeIdToGetStoreXMLFrom, MetadataStore.STORES_KEY);
                    List<StoreDefinition> oldStoreDefs = storeDefsMapper
                            .readStoreList(new StringReader(storesXML.getValue()));

                    String clusterXMLPath = metadataValuePair
                            .get(metadataKeyPair.indexOf(MetadataStore.CLUSTER_KEY));
                    clusterXMLPath = clusterXMLPath.replace("~", System.getProperty("user.home"));
                    if (!Utils.isReadableFile(clusterXMLPath))
                        throw new VoldemortException("Cluster xml file path incorrect");
                    Cluster cluster = clusterMapper.readCluster(new File(clusterXMLPath));

                    String storesXMLPath = metadataValuePair
                            .get(metadataKeyPair.indexOf(MetadataStore.STORES_KEY));
                    storesXMLPath = storesXMLPath.replace("~", System.getProperty("user.home"));
                    if (!Utils.isReadableFile(storesXMLPath))
                        throw new VoldemortException("Stores definition xml file path incorrect");
                    List<StoreDefinition> newStoreDefs = storeDefsMapper.readStoreList(new File(storesXMLPath));
                    checkSchemaCompatibility(newStoreDefs);

                    executeSetMetadataPair(nodeId, adminClient, MetadataStore.CLUSTER_KEY,
                            clusterMapper.writeCluster(cluster), MetadataStore.STORES_KEY,
                            storeDefsMapper.writeStoreList(newStoreDefs));
                    executeUpdateMetadataVersionsOnStores(adminClient, oldStoreDefs, newStoreDefs);
                } else {
                    throw new VoldemortException("set-metadata-pair keys should be <cluster.xml, stores.xml>");
                }
            }
        } else if (options.has("set-metadata")) {

            String metadataKey = (String) options.valueOf("set-metadata");
            if (!options.has("set-metadata-value")) {
                throw new VoldemortException("Missing set-metadata-value");
            } else {
                String metadataValue = (String) options.valueOf("set-metadata-value");
                if (metadataKey.compareTo(MetadataStore.CLUSTER_KEY) == 0
                        || metadataKey.compareTo(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML) == 0) {
                    if (!Utils.isReadableFile(metadataValue))
                        throw new VoldemortException("Cluster xml file path incorrect");
                    ClusterMapper mapper = new ClusterMapper();
                    Cluster newCluster = mapper.readCluster(new File(metadataValue));
                    if (options.has("auto")) {
                        executeSetMetadata(nodeId, adminClient, metadataKey, mapper.writeCluster(newCluster));
                    } else {
                        if (confirmMetadataUpdate(nodeId, adminClient, mapper.writeCluster(newCluster))) {
                            executeSetMetadata(nodeId, adminClient, metadataKey,
                                    mapper.writeCluster(newCluster));
                        } else {
                            System.out.println("New metadata has not been set");
                        }
                    }
                } else if (metadataKey.compareTo(MetadataStore.SERVER_STATE_KEY) == 0) {
                    VoldemortState newState = VoldemortState.valueOf(metadataValue);
                    executeSetMetadata(nodeId, adminClient, MetadataStore.SERVER_STATE_KEY,
                            newState.toString());
                } else if (metadataKey.compareTo(MetadataStore.STORES_KEY) == 0) {
                    if (!Utils.isReadableFile(metadataValue))
                        throw new VoldemortException("Stores definition xml file path incorrect");
                    StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
                    List<StoreDefinition> newStoreDefs = mapper.readStoreList(new File(metadataValue));
                    checkSchemaCompatibility(newStoreDefs);

                    // original metadata
                    Integer nodeIdToGetStoreXMLFrom = nodeId;
                    if (nodeId < 0) {
                        Collection<Node> nodes = adminClient.getAdminClientCluster().getNodes();
                        if (nodes.isEmpty()) {
                            throw new VoldemortException("No nodes in this cluster");
                        } else {
                            nodeIdToGetStoreXMLFrom = nodes.iterator().next().getId();
                        }
                    }

                    Versioned<String> storesXML = adminClient.metadataMgmtOps
                            .getRemoteMetadata(nodeIdToGetStoreXMLFrom, MetadataStore.STORES_KEY);

                    List<StoreDefinition> oldStoreDefs = mapper
                            .readStoreList(new StringReader(storesXML.getValue()));
                    if (options.has("auto")) {
                        executeSetMetadata(nodeId, adminClient, MetadataStore.STORES_KEY,
                                mapper.writeStoreList(newStoreDefs));
                        executeUpdateMetadataVersionsOnStores(adminClient, oldStoreDefs, newStoreDefs);
                    } else {
                        if (confirmMetadataUpdate(nodeId, adminClient, storesXML.getValue())) {
                            executeSetMetadata(nodeId, adminClient, MetadataStore.STORES_KEY,
                                    mapper.writeStoreList(newStoreDefs));
                            if (nodeId >= 0) {
                                System.err.println(
                                        "WARNING: Metadata version update of stores goes to all servers, "
                                                + "although this set-metadata oprations only goes to node "
                                                + nodeId);
                            }
                            executeUpdateMetadataVersionsOnStores(adminClient, oldStoreDefs, newStoreDefs);
                        } else {
                            System.out.println("New metadata has not been set");
                        }
                    }

                } else if (metadataKey.compareTo(MetadataStore.REBALANCING_STEAL_INFO) == 0) {
                    if (!Utils.isReadableFile(metadataValue))
                        throw new VoldemortException("Rebalancing steal info file path incorrect");
                    String rebalancingStealInfoJsonString = FileUtils.readFileToString(new File(metadataValue));
                    RebalancerState state = RebalancerState.create(rebalancingStealInfoJsonString);
                    executeSetMetadata(nodeId, adminClient, MetadataStore.REBALANCING_STEAL_INFO,
                            state.toJsonString());
                } else {
                    throw new VoldemortException("Incorrect metadata key");
                }
            }
        } else if (options.has("native-backup")) {
            if (!options.has("backup-dir")) {
                Utils.croak("A backup directory must be specified with backup-dir option");
            }

            String backupDir = (String) options.valueOf("backup-dir");
            String storeName = (String) options.valueOf("native-backup");
            int timeout = CmdUtils.valueOf(options, "backup-timeout", 30);
            adminClient.storeMntOps.nativeBackup(nodeId, storeName, backupDir, timeout,
                    options.has("backup-verify"), options.has("backup-incremental"));
        } else if (options.has("rollback")) {
            if (!options.has("version")) {
                Utils.croak("A read-only push version must be specified with rollback option");
            }
            String storeName = (String) options.valueOf("rollback");
            long pushVersion = (Long) options.valueOf("version");
            executeRollback(nodeId, storeName, pushVersion, adminClient);
        } else if (options.has("query-key")) {
            String key = (String) options.valueOf("query-key");
            String keyFormat = (String) options.valueOf("query-key-format");
            if (keyFormat == null) {
                keyFormat = "hex";
            }
            if (!keyFormat.equals("hex") && !keyFormat.equals("readable")) {
                throw new VoldemortException("--query-key-format must be hex or readable");
            }
            executeQueryKey(nodeId, adminClient, storeNames, key, keyFormat);
        } else if (options.has("restore")) {
            if (nodeId == -1) {
                System.err.println("Cannot run restore without node id");
                System.exit(1);
            }
            System.out.println("Starting restore");
            adminClient.restoreOps.restoreDataFromReplications(nodeId, parallelism, zoneId);
            System.out.println("Finished restore");
        } else if (options.has("delete-store")) {
            String storeName = (String) options.valueOf("delete-store");
            executeDeleteStore(adminClient, storeName, nodeId);
        } else if (options.has("truncate")) {
            String storeName = (String) options.valueOf("truncate");
            executeTruncateStore(nodeId, adminClient, storeName);
        } else if (options.has("update-entries")) {
            String inputDir = (String) options.valueOf("update-entries");
            executeUpdateEntries(nodeId, adminClient, storeNames, inputDir);
        } else if (options.has("fetch-entries")) {
            boolean useAscii = options.has("ascii");
            System.out.println("Starting fetch entries");
            List<Integer> partitionIdList = null;
            if (options.hasArgument("fetch-entries"))
                partitionIdList = (List<Integer>) options.valuesOf("fetch-entries");
            executeFetchEntries(nodeId, adminClient, partitionIdList, outputDir, storeNames, useAscii,
                    options.has("fetch-orphaned"));
        } else if (options.has("purge-slops")) {
            List<Integer> nodesToPurge = null;
            if (options.has("nodes")) {
                nodesToPurge = (List<Integer>) options.valuesOf("nodes");
            }
            if (nodesToPurge == null && zoneId == -1 && storeNames == null) {
                Utils.croak("Must specify atleast one of --nodes, --zone-id or --stores with --purge-slops");
            }
            executePurgeSlops(adminClient, nodesToPurge, zoneId, storeNames);
        } else if (options.has("synchronize-metadata-version")) {
            synchronizeMetadataVersion(adminClient, nodeId);
        } else if (options.has("verify-metadata-version")) {
            checkMetadataVersion(adminClient);
        } else if (options.has("show-routing-plan")) {
            if (!options.has("store")) {
                Utils.croak("Must specify the store the keys belong to using --store ");
            }
            String storeName = (String) options.valueOf("store");
            List<String> keysToRoute = (List<String>) options.valuesOf("show-routing-plan");
            if (keysToRoute == null || keysToRoute.size() == 0) {
                Utils.croak("Must specify comma separated keys list in hex format");
            }
            executeShowRoutingPlan(adminClient, storeName, keysToRoute);

        } else if (options.has("set-quota")) {
            String quotaType = (String) options.valueOf("set-quota");
            Set<String> validQuotaTypes = QuotaUtils.validQuotaTypes();
            if (!validQuotaTypes.contains(quotaType)) {
                Utils.croak("Specify a valid quota type from :" + validQuotaTypes);
            }
            if (!options.has("store")) {
                Utils.croak("Must specify the store to enforce the quota on. ");
            }
            if (!options.has("quota-value")) {
                Utils.croak("Must specify the value of the quota being set");
            }
            String storeName = (String) options.valueOf("store");
            String quotaValue = (String) options.valueOf("quota-value");
            executeSetQuota(adminClient, storeName, quotaType, quotaValue);

        } else if (options.has("unset-quota")) {
            String quotaType = (String) options.valueOf("unset-quota");
            Set<String> validQuotaTypes = QuotaUtils.validQuotaTypes();
            if (!validQuotaTypes.contains(quotaType)) {
                Utils.croak("Specify a valid quota type from :" + validQuotaTypes);
            }
            if (!options.has("store")) {
                Utils.croak("Must specify the store to enforce the quota on. ");
            }
            String storeName = (String) options.valueOf("store");
            executeUnsetQuota(adminClient, storeName, quotaType);
        } else if (options.has("get-quota")) {
            String quotaType = (String) options.valueOf("get-quota");
            Set<String> validQuotaTypes = QuotaUtils.validQuotaTypes();
            if (!validQuotaTypes.contains(quotaType)) {
                Utils.croak("Specify a valid quota type from :" + validQuotaTypes);
            }
            if (!options.has("store")) {
                Utils.croak("Must specify the store to enforce the quota on. ");
            }
            String storeName = (String) options.valueOf("store");
            executeGetQuota(adminClient, storeName, quotaType);
        } else {

            Utils.croak("At least one of (delete-partitions, restore, add-node, fetch-entries, "
                    + "fetch-keys, add-stores, delete-store, update-entries, get-metadata, ro-metadata, "
                    + "set-metadata, check-metadata, clear-rebalancing-metadata, async, "
                    + "repair-job, native-backup, rollback, reserve-memory, mirror-url,"
                    + " verify-metadata-version, prune-job, purge-slops) must be specified");
        }
    } catch (Exception e) {
        e.printStackTrace();
        Utils.croak(e.getMessage());
    }
}

From source file:Main.java

/**
 * Check whether two {@link Map} equal.// ww  w .j  av a 2 s .co m
 */
public static <T> boolean equals(Map<T, byte[]> map, Map<T, byte[]> otherMap) {
    if (map == otherMap) {
        return true;
    }
    if (map == null || otherMap == null) {
        return false;
    }
    if (map.size() != otherMap.size()) {
        return false;
    }
    Set<T> keys = map.keySet();
    if (!keys.equals(otherMap.keySet())) {
        return false;
    }
    for (T key : keys) {
        if (!Objects.deepEquals(map.get(key), otherMap.get(key))) {
            return false;
        }
    }
    return true;
}

From source file:Main.java

/**
 * Check whether two {@link Map} equal.//  w  w w.  j  av  a  2 s  . c  om
 */
static <T> boolean equals(Map<T, byte[]> map, Map<T, byte[]> otherMap) {
    if (map == otherMap) {
        return true;
    }
    if (map == null || otherMap == null) {
        return false;
    }
    if (map.size() != otherMap.size()) {
        return false;
    }
    Set<T> keys = map.keySet();
    if (!keys.equals(otherMap.keySet())) {
        return false;
    }
    for (T key : keys) {
        if (!Objects.deepEquals(map.get(key), otherMap.get(key))) {
            return false;
        }
    }
    return true;
}

From source file:Main.java

public static boolean containSameItems(Collection c1, Collection c2) {
    Set s1 = (c1 instanceof Set) ? (Set) c1 : new HashSet(c1);
    Set s2 = (c2 instanceof Set) ? (Set) c2 : new HashSet(c2);
    return s1.equals(s2);
}

From source file:org.apache.cassandra.dht.RangeIntersectionTest.java

static void assertIntersection(Range one, Range two, Range... ranges) {
    Set<Range> correct = Range.rangeSet(ranges);
    Set<Range> result1 = one.intersectionWith(two);
    assert result1.equals(correct) : String.format("%s != %s", StringUtils.join(result1, ","),
            StringUtils.join(correct, ","));
    Set<Range> result2 = two.intersectionWith(one);
    assert result2.equals(correct) : String.format("%s != %s", StringUtils.join(result2, ","),
            StringUtils.join(correct, ","));
}

From source file:uniol.apt.analysis.isomorphism.IsomorphismLogic.java

private static BidiMap<State, State> checkViaDepthSearch(TransitionSystem lts1, TransitionSystem lts2) {
    BidiMap<State, State> result = new DualHashBidiMap<>();
    Set<String> alphabet = lts1.getAlphabet();
    if (!alphabet.equals(lts2.getAlphabet()))
        // Not isomorphic, there is an arc with a label not occurring in the other lts
        return result;

    Queue<Pair<State, State>> unhandled = new ArrayDeque<>();
    visit(result, unhandled, lts1.getInitialState(), lts2.getInitialState());

    while (!unhandled.isEmpty()) {
        InterrupterRegistry.throwIfInterruptRequestedForCurrentThread();

        Pair<State, State> pair = unhandled.remove();
        State state1 = pair.getFirst();
        State state2 = pair.getSecond();

        for (String label : alphabet) {
            State follow1 = follow(state1, label);
            State follow2 = follow(state2, label);

            if (!visit(result, unhandled, follow1, follow2))
                // Not isomorphic
                return new DualHashBidiMap<>();
        }/*from w  w  w  . j  a va 2  s. c  o m*/
    }

    return result;
}