Example usage for java.util EnumSet noneOf

List of usage examples for java.util EnumSet noneOf

Introduction

In this page you can find the example usage for java.util EnumSet noneOf.

Prototype

public static <E extends Enum<E>> EnumSet<E> noneOf(Class<E> elementType) 

Source Link

Document

Creates an empty enum set with the specified element type.

Usage

From source file:org.apache.hadoop.tools.mapred.TestCopyMapper.java

@Test(timeout = 40000)
public void testCopyFailOnBlockSizeDifference() {
    try {/*  w w w  .  j  a  va 2s.  c  om*/
        deleteState();
        createSourceDataWithDifferentBlockSize();

        FileSystem fs = cluster.getFileSystem();
        CopyMapper copyMapper = new CopyMapper();
        StubContext stubContext = new StubContext(getConfiguration(), null, 0);
        Mapper<Text, CopyListingFileStatus, Text, Text>.Context context = stubContext.getContext();

        Configuration configuration = context.getConfiguration();
        EnumSet<DistCpOptions.FileAttribute> fileAttributes = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
        configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
                DistCpUtils.packAttributes(fileAttributes));

        copyMapper.setup(context);

        for (Path path : pathList) {
            final FileStatus fileStatus = fs.getFileStatus(path);
            copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
                    new CopyListingFileStatus(fileStatus), context);
        }

        Assert.fail("Copy should have failed because of block-size difference.");
    } catch (Exception exception) {
        // Check that the exception suggests the use of -pb/-skipCrc.
        Assert.assertTrue("Failure exception should have suggested the use of -pb.",
                exception.getCause().getCause().getMessage().contains("pb"));
        Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.",
                exception.getCause().getCause().getMessage().contains("skipCrc"));
    }
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

private void testPreserveBlockSizeAndReplicationImpl(boolean preserve) {
    try {//w  w  w  .  j a v a2s.  co m

        deleteState();
        createSourceData();

        FileSystem fs = cluster.getFileSystem();
        CopyMapper copyMapper = new CopyMapper();
        StatusReporter reporter = new StubStatusReporter();
        InMemoryWriter writer = new InMemoryWriter();
        Mapper<Text, FileStatus, NullWritable, Text>.Context context = getMapperContext(copyMapper, reporter,
                writer);

        Configuration configuration = context.getConfiguration();
        EnumSet<DistCpOptions.FileAttribute> fileAttributes = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
        if (preserve) {
            fileAttributes.add(DistCpOptions.FileAttribute.BLOCKSIZE);
            fileAttributes.add(DistCpOptions.FileAttribute.REPLICATION);
        }
        configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
                DistCpUtils.packAttributes(fileAttributes));

        copyMapper.setup(context);

        for (Path path : pathList) {
            final FileStatus fileStatus = fs.getFileStatus(path);
            copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), fileStatus,
                    context);
        }

        // Check that the block-size/replication aren't preserved.
        for (Path path : pathList) {
            final Path targetPath = new Path(path.toString().replaceAll(SOURCE_PATH, TARGET_PATH));
            final FileStatus source = fs.getFileStatus(path);
            final FileStatus target = fs.getFileStatus(targetPath);
            if (!source.isDir()) {
                Assert.assertTrue(preserve || source.getBlockSize() != target.getBlockSize());
                Assert.assertTrue(preserve || source.getReplication() != target.getReplication());
                Assert.assertTrue(!preserve || source.getBlockSize() == target.getBlockSize());
                Assert.assertTrue(!preserve || source.getReplication() == target.getReplication());
            }
        }
    } catch (Exception e) {
        Assert.assertTrue("Unexpected exception: " + e.getMessage(), false);
        e.printStackTrace();
    }
}

From source file:org.lilyproject.repository.impl.HBaseRepository.java

private Set<Scope> calculateUpdateFields(Record parentRecord, Map<QName, Object> fields,
        Map<QName, Metadata> fieldMetadata, Map<QName, Object> originalFields,
        Map<QName, Metadata> originalFieldMetadata, Map<QName, Object> originalNextFields, Long version,
        Put put, RecordEvent recordEvent, Set<BlobReference> referencedBlobs,
        Set<BlobReference> unReferencedBlobs, boolean mutableUpdate, FieldTypes fieldTypes)
        throws InterruptedException, RepositoryException {

    Set<Scope> changedScopes = EnumSet.noneOf(Scope.class);

    // In the below algorithm, the following facts are good to know about metadata:
    //  - there can only be field metadata when there is a field value
    //  - metadata is updated in the same way as fields: only updated values need to be specified, and deletes
    //    are explicit (Metadata.deletedFields).
    //  - it is possible/supported that only metadata changes, and that the field value stayed the same. Thus it
    //    can be that a Record object contains metadata for a field but no field value (because that stays
    //    the same). For versioned fields, this causes a new version.

    // Map containing the actual new metadata that needs to be applied: thus the merged view of the old
    // and new metadata. In case the metadata has not changed, there will be not an entry in here, and
    // the metadata from the old field needs to be copied.
    Map<QName, Metadata> newMetadata = new HashMap<QName, Metadata>();

    Iterator<Entry<QName, Metadata>> fieldMetadataIt = fieldMetadata.entrySet().iterator();
    while (fieldMetadataIt.hasNext()) {
        Entry<QName, Metadata> entry = fieldMetadataIt.next();
        // If it's not a deleted field
        if (!isDeleteMarker(fields.get(entry.getKey()))) {
            // If the metadata has changed
            if (entry.getValue().updates(originalFieldMetadata.get(entry.getKey()))) {
                boolean needMetadata;

                // And the field itself didn't change
                if (!fields.containsKey(entry.getKey())) {
                    // And if the field existed before (you can't add metadata without having a field value)
                    if (originalFields.containsKey(entry.getKey())) {
                        // Then add the field in the fields map so that it will be treated in the loop that
                        // handles updated field values below
                        fields.put(entry.getKey(), METADATA_ONLY_UPDATE);
                        needMetadata = true;
                    } else {
                        // No new or old field value: can't have metadata for a field without a value
                        needMetadata = false;
                        // Remove this invalid metadata from the record object (the idea being that the record
                        // object returned to the user should correspond to persisted state).
                        fieldMetadataIt.remove();
                    }/* ww  w  .j av a 2s.  c  om*/
                } else {
                    // Both field & metadata changed
                    needMetadata = true;
                }

                if (needMetadata) {
                    // Now that we've done all the checks to determine we need the metadata, calculate it
                    newMetadata.put(entry.getKey(),
                            mergeMetadata(entry.getValue(), originalFieldMetadata.get(entry.getKey())));
                }
            }
        } else {
            // Field is deleted.
            // Remove this invalid metadata from the record object (the idea being that the record
            // object returned to the user should correspond to persisted state).
            fieldMetadataIt.remove();
        }
    }

    FieldValueWriter fieldValueWriter = newFieldValueWriter(put, parentRecord);

    for (Entry<QName, Object> field : fields.entrySet()) {
        QName fieldName = field.getKey();
        Object newValue = field.getValue();
        boolean fieldIsNewOrDeleted = !originalFields.containsKey(fieldName);
        Object originalValue = originalFields.get(fieldName);
        if (!(((newValue == null) && (originalValue == null)) // Don't update if both are null
                || (isDeleteMarker(newValue) && fieldIsNewOrDeleted) // Don't delete if it doesn't exist
                || (newValue.equals(originalValue))) // Don't update if they didn't change
                || newMetadata.containsKey(field.getKey())) { // But do update if the metadata changed
            FieldTypeImpl fieldType = (FieldTypeImpl) fieldTypes.getFieldType(fieldName);
            Scope scope = fieldType.getScope();

            boolean metadataOnlyUpdate = false;
            if (newValue == METADATA_ONLY_UPDATE) {
                // The metadata was updated, but the field itself not
                metadataOnlyUpdate = true;
                newValue = originalFields.get(fieldName);
            }

            // Either use new or inherit old metadata (newMetadata map contains the merged metadata)
            Metadata metadata = newMetadata.get(fieldName);
            if (metadata == null) {
                metadata = originalFieldMetadata.get(fieldName);
            }

            if (!metadataOnlyUpdate) {
                // Check if the newValue contains blobs
                Set<BlobReference> newReferencedBlobs = getReferencedBlobs(fieldType, newValue);
                referencedBlobs.addAll(newReferencedBlobs);

                // Check if the previousValue contained blobs which should be deleted since they are no longer used
                // In case of a mutable update, it is checked later if no other versions use the blob before deciding to delete it
                if (Scope.NON_VERSIONED.equals(scope)
                        || (mutableUpdate && Scope.VERSIONED_MUTABLE.equals(scope))) {
                    if (originalValue != null) {
                        Set<BlobReference> previouslyReferencedBlobs = getReferencedBlobs(fieldType,
                                originalValue);
                        previouslyReferencedBlobs.removeAll(newReferencedBlobs);
                        unReferencedBlobs.addAll(previouslyReferencedBlobs);
                    }
                }
            }

            // Set the value
            if (Scope.NON_VERSIONED.equals(scope)) {
                fieldValueWriter.addFieldValue(fieldType, newValue, metadata, 1L);
            } else {
                fieldValueWriter.addFieldValue(fieldType, newValue, metadata, version);

                // If it is a mutable update and the next version of the field was the same as the one that is being updated,
                // the original value needs to be copied to that next version (due to sparseness of the table).
                if (originalNextFields != null && !fieldIsNewOrDeleted
                        && originalNextFields.containsKey(fieldName)) {
                    copyValueToNextVersionIfNeeded(parentRecord, version, fieldValueWriter, originalNextFields,
                            fieldName, originalValue, fieldTypes);
                }
            }

            changedScopes.add(scope);

            recordEvent.addUpdatedField(fieldType.getId());
        }
    }
    return changedScopes;
}

From source file:org.opencb.opencga.catalog.auth.authorization.CatalogAuthorizationManager.java

private DatasetAclEntry resolveDatasetPermissions(long studyId, String userId, String groupId,
        Map<String, DatasetAclEntry> userAclMap) throws CatalogException {
    if (userId.equals(ANONYMOUS)) {
        if (userAclMap.containsKey(userId)) {
            return userAclMap.get(userId);
        } else {/*from w ww.j  a  v  a 2 s .c o  m*/
            return transformStudyAclToDatasetAcl(getStudyAclBelonging(studyId, userId, groupId));
        }
    }

    // Registered user
    EnumSet<DatasetAclEntry.DatasetPermissions> permissions = EnumSet
            .noneOf(DatasetAclEntry.DatasetPermissions.class);
    boolean flagPermissionFound = false;

    if (userAclMap.containsKey(userId)) {
        permissions.addAll(userAclMap.get(userId).getPermissions());
        flagPermissionFound = true;
    }
    if (StringUtils.isNotEmpty(groupId) && userAclMap.containsKey(groupId)) {
        permissions.addAll(userAclMap.get(groupId).getPermissions());
        flagPermissionFound = true;
    }
    if (userAclMap.containsKey(ANONYMOUS)) {
        permissions.addAll(userAclMap.get(ANONYMOUS).getPermissions());
        flagPermissionFound = true;
    }
    if (userAclMap.containsKey(OTHER_USERS_ID)) {
        permissions.addAll(userAclMap.get(OTHER_USERS_ID).getPermissions());
        flagPermissionFound = true;
    }

    if (flagPermissionFound) {
        return new DatasetAclEntry(userId, permissions);
    } else {
        return transformStudyAclToDatasetAcl(getStudyAclBelonging(studyId, userId, groupId));
    }
}

From source file:org.apache.openjpa.persistence.AnnotationPersistenceMetaDataSerializer.java

/**
 * Serialize cascades./*from   w ww .  j a va 2 s . c  om*/
 */
private void serializeCascades(ValueMetaData vmd, AnnotationBuilder ab) {
    EnumSet<CascadeType> cascades = EnumSet.noneOf(CascadeType.class);
    if (vmd.getCascadePersist() == ValueMetaData.CASCADE_IMMEDIATE) {
        cascades.add(CascadeType.PERSIST);
    }
    if (vmd.getCascadeAttach() == ValueMetaData.CASCADE_IMMEDIATE) {
        cascades.add(CascadeType.MERGE);
    }
    if (vmd.getCascadeDelete() == ValueMetaData.CASCADE_IMMEDIATE) {
        cascades.add(CascadeType.REMOVE);
    }
    if (vmd.getCascadeRefresh() == ValueMetaData.CASCADE_IMMEDIATE) {
        cascades.add(CascadeType.REFRESH);
    }
    if (vmd.getCascadeDetach() == ValueMetaData.CASCADE_IMMEDIATE) {
        cascades.add(CascadeType.DETACH);
    }
    if (cascades.size() == 5) // ALL
    {
        cascades.clear();
        cascades.add(CascadeType.ALL);
    }
    if (!cascades.isEmpty()) {
        ab.add("cascade", cascades);
    }
}

From source file:org.apache.hadoop.tools.mapred.TestCopyMapper.java

private void testPreserveBlockSizeAndReplicationImpl(boolean preserve) {
    try {// www. j a  v  a2 s .co m

        deleteState();
        createSourceData();

        FileSystem fs = cluster.getFileSystem();
        CopyMapper copyMapper = new CopyMapper();
        StubContext stubContext = new StubContext(getConfiguration(), null, 0);
        Mapper<Text, CopyListingFileStatus, Text, Text>.Context context = stubContext.getContext();

        Configuration configuration = context.getConfiguration();
        EnumSet<DistCpOptions.FileAttribute> fileAttributes = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
        if (preserve) {
            fileAttributes.add(DistCpOptions.FileAttribute.BLOCKSIZE);
            fileAttributes.add(DistCpOptions.FileAttribute.REPLICATION);
        }
        configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
                DistCpUtils.packAttributes(fileAttributes));

        copyMapper.setup(context);

        for (Path path : pathList) {
            final FileStatus fileStatus = fs.getFileStatus(path);
            copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
                    new CopyListingFileStatus(fileStatus), context);
        }

        // Check that the block-size/replication aren't preserved.
        for (Path path : pathList) {
            final Path targetPath = new Path(path.toString().replaceAll(SOURCE_PATH, TARGET_PATH));
            final FileStatus source = fs.getFileStatus(path);
            final FileStatus target = fs.getFileStatus(targetPath);
            if (!source.isDirectory()) {
                Assert.assertTrue(preserve || source.getBlockSize() != target.getBlockSize());
                Assert.assertTrue(preserve || source.getReplication() != target.getReplication());
                Assert.assertTrue(!preserve || source.getBlockSize() == target.getBlockSize());
                Assert.assertTrue(!preserve || source.getReplication() == target.getReplication());
            }
        }
    } catch (Exception e) {
        Assert.assertTrue("Unexpected exception: " + e.getMessage(), false);
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestCacheDirectives.java

@Test(timeout = 12000000)
public void testWaitForCachedReplicas() throws Exception {
    FileSystemTestHelper helper = new FileSystemTestHelper();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override/*from w  w  w  .j  a  v  a 2s .  c  om*/
        public Boolean get() {
            return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY))
                    && (namenode.getNamesystem().getCacheUsed() == 0));
        }
    }, 500, 60000);

    // Send a cache report referring to a bogus block.  It is important that
    // the NameNode be robust against this.
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    DataNode dn0 = cluster.getDataNodes().get(0);
    String bpid = cluster.getNamesystem().getBlockPoolId();
    LinkedList<Long> bogusBlockIds = new LinkedList<Long>();
    bogusBlockIds.add(999999L);
    nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds, CACHE_CAPACITY, 0);

    Path rootDir = helper.getDefaultWorkingDirectory(dfs);
    // Create the pool
    final String pool = "friendlyPool";
    nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
    // Create some test files
    final int numFiles = 2;
    final int numBlocksPerFile = 2;
    final List<String> paths = new ArrayList<String>(numFiles);
    for (int i = 0; i < numFiles; i++) {
        Path p = new Path(rootDir, "testCachePaths-" + i);
        FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile, (int) BLOCK_SIZE);
        paths.add(p.toUri().getPath());
    }
    // Check the initial statistics at the namenode
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
    // Cache and check each path in sequence
    int expected = 0;
    for (int i = 0; i < numFiles; i++) {
        CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i)))
                .setPool(pool).build();
        nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
        expected += numBlocksPerFile;
        waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:1");
    }
    //it may happen that the cache hertbeat arrive before the node heartbeat which can result in a race condition
    // resulting in the test failling 
    Thread.sleep(10000);
    // Check that the datanodes have the right cache values
    DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
    assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
    long totalUsed = 0;
    for (DatanodeInfo dn : live) {
        final long cacheCapacity = dn.getCacheCapacity();
        final long cacheUsed = dn.getCacheUsed();
        final long cacheRemaining = dn.getCacheRemaining();
        assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
        assertEquals("Capacity not equal to used + remaining", cacheCapacity, cacheUsed + cacheRemaining);
        assertEquals("Remaining not equal to capacity - used", cacheCapacity - cacheUsed, cacheRemaining);
        totalUsed += cacheUsed;
    }
    assertEquals(expected * BLOCK_SIZE, totalUsed);

    // Uncache and check each path in sequence
    RemoteIterator<CacheDirectiveEntry> entries = new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
    for (int i = 0; i < numFiles; i++) {
        CacheDirectiveEntry entry = entries.next();
        nnRpc.removeCacheDirective(entry.getInfo().getId());
        expected -= numBlocksPerFile;
        waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:2");
    }
}

From source file:com.google.bitcoin.core.Wallet.java

/**
 * <p>Called when we have found a transaction (via network broadcast or otherwise) that is relevant to this wallet
 * and want to record it. Note that we <b>cannot verify these transactions at all</b>, they may spend fictional
 * coins or be otherwise invalid. They are useful to inform the user about coins they can expect to receive soon,
 * and if you trust the sender of the transaction you can choose to assume they are in fact valid and will not
 * be double spent as an optimization.</p>
 *
 * <p>This is the same as {@link Wallet#receivePending(Transaction, java.util.List)} but allows you to override the
 * {@link Wallet#isPendingTransactionRelevant(Transaction)} sanity-check to keep track of transactions that are not
 * spendable or spend our coins. This can be useful when you want to keep track of transaction confidence on
 * arbitrary transactions. Note that transactions added in this way will still be relayed to peers and appear in
 * transaction lists like any other pending transaction (even when not relevant).</p>
 *//*from w w w  . j  a va 2 s. c  o  m*/
public void receivePending(Transaction tx, @Nullable List<Transaction> dependencies, boolean overrideIsRelevant)
        throws VerificationException {
    // Can run in a peer thread. This method will only be called if a prior call to isPendingTransactionRelevant
    // returned true, so we already know by this point that it sends coins to or from our wallet, or is a double
    // spend against one of our other pending transactions.
    lock.lock();
    try {
        tx.verify();
        // Ignore it if we already know about this transaction. Receiving a pending transaction never moves it
        // between pools.
        EnumSet<Pool> containingPools = getContainingPools(tx);
        if (!containingPools.equals(EnumSet.noneOf(Pool.class))) {
            log.debug("Received tx we already saw in a block or created ourselves: " + tx.getHashAsString());
            return;
        }
        // Repeat the check of relevancy here, even though the caller may have already done so - this is to avoid
        // race conditions where receivePending may be being called in parallel.
        if (!overrideIsRelevant && !isPendingTransactionRelevant(tx))
            return;
        if (isTransactionRisky(tx, dependencies) && !acceptRiskyTransactions)
            return;
        if (tx.getConfidence().getSource().equals(TransactionConfidence.Source.UNKNOWN)) {
            log.warn("Wallet received transaction with an unknown source. Consider tagging it!");
        }
        // If this tx spends any of our unspent outputs, mark them as spent now, then add to the pending pool. This
        // ensures that if some other client that has our keys broadcasts a spend we stay in sync. Also updates the
        // timestamp on the transaction and registers/runs event listeners.
        commitTx(tx);
    } finally {
        lock.unlock();
    }
    // maybeRotateKeys() will ignore pending transactions so we don't bother calling it here (see the comments
    // in that function for an explanation of why).
}

From source file:pl.edu.icm.cermine.pubmed.RuleBasedPubmedXMLGenerator.java

public static void main(String[] args) {
    if (args.length != 1) {
        System.err.println("Usage: <pubmed directory>");
        System.exit(1);/*from   w  w  w. j a v  a2s .  co  m*/
    }

    File dir = new File(args[0]);
    Collection<File> files = FileUtils.listFiles(dir, new String[] { "pdf" }, true);
    int i = 0;
    for (File pdfFile : files) {
        try {
            String pdfPath = pdfFile.getPath();
            String nxmlPath = TextUtils.getNLMPath(pdfPath);
            String cxmlPath = pdfPath.replaceFirst("\\.pdf", ".cxml");
            String cpxmlPath = pdfPath.replaceFirst("\\.pdf", ".cxml-corr");

            File cpxmlFile = new File(cpxmlPath);
            if (cpxmlFile.exists()) {
                i++;
                continue;
            }

            System.out.println(pdfPath);

            InputStream nxmlStream = new FileInputStream(nxmlPath);
            InputStream cxmlStream = new FileInputStream(cxmlPath);

            RuleBasedPubmedXMLGenerator datasetGenerator = new RuleBasedPubmedXMLGenerator();
            datasetGenerator.setVerbose(false);
            BxDocument bxDoc = datasetGenerator.generateTrueViz(cxmlStream, nxmlStream);
            i++;
            int keys = 0;
            Set<BxZoneLabel> set = EnumSet.noneOf(BxZoneLabel.class);
            int total = 0;
            int known = 0;
            for (BxZone z : bxDoc.asZones()) {
                total++;
                if (z.getLabel() != null) {
                    known++;
                    if (z.getLabel().isOfCategoryOrGeneral(BxZoneLabelCategory.CAT_METADATA)) {
                        set.add(z.getLabel());
                    }
                    if (BxZoneLabel.REFERENCES.equals(z.getLabel())) {
                        keys = 1;
                    }
                }
            }

            if (set.contains(BxZoneLabel.MET_AFFILIATION)) {
                keys++;
            }
            if (set.contains(BxZoneLabel.MET_AUTHOR)) {
                keys++;
            }
            if (set.contains(BxZoneLabel.MET_BIB_INFO)) {
                keys++;
            }
            if (set.contains(BxZoneLabel.MET_TITLE)) {
                keys++;
            }

            FileWriter fstream = new FileWriter(cpxmlPath);
            BufferedWriter out = new BufferedWriter(fstream);
            BxDocumentToTrueVizWriter writer = new BxDocumentToTrueVizWriter();
            out.write(writer.write(Lists.newArrayList(bxDoc)));
            out.close();
            System.out.println(
                    "Progress: " + i + " out of " + files.size() + " (" + (i * 100. / files.size()) + "%)");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

private void testPreserveUserGroupImpl(boolean preserve) {
    try {//from  ww w .  j a  v a  2s . c  o  m

        deleteState();
        createSourceData();
        changeUserGroup("Michael", "Corleone");

        FileSystem fs = cluster.getFileSystem();
        CopyMapper copyMapper = new CopyMapper();
        StatusReporter reporter = new StubStatusReporter();
        InMemoryWriter writer = new InMemoryWriter();
        Mapper<Text, FileStatus, NullWritable, Text>.Context context = getMapperContext(copyMapper, reporter,
                writer);

        Configuration configuration = context.getConfiguration();
        EnumSet<DistCpOptions.FileAttribute> fileAttributes = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
        if (preserve) {
            fileAttributes.add(DistCpOptions.FileAttribute.USER);
            fileAttributes.add(DistCpOptions.FileAttribute.GROUP);
            fileAttributes.add(DistCpOptions.FileAttribute.PERMISSION);
        }

        configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
                DistCpUtils.packAttributes(fileAttributes));
        copyMapper.setup(context);

        for (Path path : pathList) {
            final FileStatus fileStatus = fs.getFileStatus(path);
            copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), fileStatus,
                    context);
        }

        // Check that the user/group attributes are preserved
        // (only) as necessary.
        for (Path path : pathList) {
            final Path targetPath = new Path(path.toString().replaceAll(SOURCE_PATH, TARGET_PATH));
            final FileStatus source = fs.getFileStatus(path);
            final FileStatus target = fs.getFileStatus(targetPath);
            if (!source.isDir()) {
                Assert.assertTrue(!preserve || source.getOwner().equals(target.getOwner()));
                Assert.assertTrue(!preserve || source.getGroup().equals(target.getGroup()));
                Assert.assertTrue(!preserve || source.getPermission().equals(target.getPermission()));
                Assert.assertTrue(preserve || !source.getOwner().equals(target.getOwner()));
                Assert.assertTrue(preserve || !source.getGroup().equals(target.getGroup()));
                Assert.assertTrue(preserve || !source.getPermission().equals(target.getPermission()));
                Assert.assertTrue(source.isDir() || source.getReplication() != target.getReplication());
            }
        }
    } catch (Exception e) {
        Assert.assertTrue("Unexpected exception: " + e.getMessage(), false);
        e.printStackTrace();
    }
}