Example usage for java.nio.file StandardOpenOption READ

List of usage examples for java.nio.file StandardOpenOption READ

Introduction

In this page you can find the example usage for java.nio.file StandardOpenOption READ.

Prototype

StandardOpenOption READ

To view the source code for java.nio.file StandardOpenOption READ.

Click Source Link

Document

Open for read access.

Usage

From source file:info.novatec.inspectit.rcp.storage.util.DataRetriever.java

/**
 * Returns cached data for the given hash locally. This method can be used when storage if fully
 * downloaded./*from w  ww .j a v  a2s. c om*/
 * 
 * @param <E>
 *            Type of the objects are wanted.
 * 
 * @param localStorageData
 *            {@link LocalStorageData} that points to the wanted storage.
 * @param hash
 *            Hash under which the cached data is stored.
 * @return Returns cached data for the storage if the cached data exists for given hash. If data
 *         does not exist <code>null</code> is returned.
 * @throws SerializationException
 *             If {@link SerializationException} occurs.
 * @throws IOException
 *             If {@link IOException} occurs.
 */
@SuppressWarnings("unchecked")
public <E extends DefaultData> List<E> getCachedDataLocally(LocalStorageData localStorageData, int hash)
        throws IOException, SerializationException {
    Path path = storageManager.getCachedDataPath(localStorageData, hash);
    if (Files.notExists(path)) {
        return null;
    } else {
        ISerializer serializer = null;
        try {
            serializer = serializerQueue.take();
        } catch (InterruptedException e) {
            Thread.interrupted();
        }

        Input input = null;
        try (InputStream inputStream = Files.newInputStream(path, StandardOpenOption.READ)) {
            input = new Input(inputStream);
            Object object = serializer.deserialize(input);
            List<E> receivedData = (List<E>) object;
            return receivedData;
        } finally {
            if (null != input) {
                input.close();
            }
            serializerQueue.add(serializer);
        }
    }
}

From source file:com.arpnetworking.metrics.common.tailer.StatefulTailer.java

private Optional<Boolean> compareByHash(final Optional<String> prefixHash, final int prefixLength) {
    final int appliedLength;
    if (_hash.isPresent()) {
        appliedLength = REQUIRED_BYTES_FOR_HASH;
    } else {/*from w ww .  j av a 2 s  . c om*/
        appliedLength = prefixLength;
    }
    try (final SeekableByteChannel reader = Files.newByteChannel(_file, StandardOpenOption.READ)) {
        final Optional<String> filePrefixHash = computeHash(reader, appliedLength);

        LOGGER.trace().setMessage("Comparing hashes").addData("hash1", prefixHash)
                .addData("filePrefixHash", filePrefixHash).addData("size", appliedLength).log();

        return Optional.of(Objects.equals(_hash.orElse(prefixHash.orElse(null)), filePrefixHash.orElse(null)));
    } catch (final IOException e) {
        return Optional.empty();
    }
}

From source file:com.google.pubsub.flic.controllers.GCEController.java

/**
 * Uploads a given file to Google Storage.
 *//*  w  w w. j  ava  2 s .  com*/
private void uploadFile(Path filePath) throws IOException {
    try {
        byte[] md5hash = Base64.decodeBase64(
                storage.objects().get(projectName + "-cloud-pubsub-loadtest", filePath.getFileName().toString())
                        .execute().getMd5Hash());
        try (InputStream inputStream = Files.newInputStream(filePath, StandardOpenOption.READ)) {
            if (Arrays.equals(md5hash, DigestUtils.md5(inputStream))) {
                log.info("File " + filePath.getFileName() + " is current, reusing.");
                return;
            }
        }
        log.info("File " + filePath.getFileName() + " is out of date, uploading new version.");
        storage.objects().delete(projectName + "-cloud-pubsub-loadtest", filePath.getFileName().toString())
                .execute();
    } catch (GoogleJsonResponseException e) {
        if (e.getStatusCode() != NOT_FOUND) {
            throw e;
        }
    }
    try (InputStream inputStream = Files.newInputStream(filePath, StandardOpenOption.READ)) {
        storage.objects()
                .insert(projectName + "-cloud-pubsub-loadtest", null,
                        new InputStreamContent("application/octet-stream", inputStream))
                .setName(filePath.getFileName().toString()).execute();
        log.info("File " + filePath.getFileName() + " created.");
    }
}

From source file:org.polago.deployconf.DeployConfRunner.java

/**
 * Gets a DeploymentConfig instance from a Path.
 *
 * @param path the file to use//from   ww  w . j av a 2s . c o  m
 * @return a DeploymentConfig representation of the ReadableByteChannel
 * @throws Exception indicating error
 */
private DeploymentConfig getDeploymentConfigFromPath(Path path) throws Exception {

    ReadableByteChannel ch = FileChannel.open(path, StandardOpenOption.READ);
    InputStream is = Channels.newInputStream(ch);

    DeploymentReader reader = new DeploymentReader(is, getGroupManager());
    DeploymentConfig result = reader.parse();
    ch.close();

    return result;
}

From source file:com.github.podd.resources.UploadArtifactResourceImpl.java

private InferredOWLOntologyID uploadFileAndLoadArtifactIntoPodd(final Representation entity)
        throws ResourceException {
    List<FileItem> items;//from  ww  w  .j a va2  s . c  o m
    Path filePath = null;
    String contentType = null;

    // 1: Create a factory for disk-based file items
    final DiskFileItemFactory factory = new DiskFileItemFactory(1000240, this.tempDirectory.toFile());

    // 2: Create a new file upload handler
    final RestletFileUpload upload = new RestletFileUpload(factory);
    final Map<String, String> props = new HashMap<String, String>();
    try {
        // 3: Request is parsed by the handler which generates a list of
        // FileItems
        items = upload.parseRequest(this.getRequest());

        for (final FileItem fi : items) {
            final String name = fi.getName();

            if (name == null) {
                props.put(fi.getFieldName(), new String(fi.get(), StandardCharsets.UTF_8));
            } else {
                // FIXME: Strip everything up to the last . out of the
                // filename so that
                // the filename can be used for content type determination
                // where
                // possible.
                // InputStream uploadedFileInputStream =
                // fi.getInputStream();
                try {
                    // Note: These are Java-7 APIs
                    contentType = fi.getContentType();
                    props.put("Content-Type", fi.getContentType());

                    filePath = Files.createTempFile(this.tempDirectory, "ontologyupload-", name);
                    final File file = filePath.toFile();
                    file.deleteOnExit();
                    fi.write(file);
                } catch (final IOException ioe) {
                    throw ioe;
                } catch (final Exception e) {
                    // avoid throwing a generic exception just because the
                    // apache
                    // commons library throws Exception
                    throw new IOException(e);
                }
            }
        }
    } catch (final IOException | FileUploadException e) {
        throw new ResourceException(Status.CLIENT_ERROR_BAD_REQUEST, e);
    }

    this.log.info("props={}", props.toString());

    if (filePath == null) {
        throw new ResourceException(Status.CLIENT_ERROR_BAD_REQUEST,
                "Did not submit a valid file and filename");
    }

    this.log.info("filename={}", filePath.toAbsolutePath().toString());
    this.log.info("contentType={}", contentType);

    RDFFormat format = null;

    // If the content type was application/octet-stream then use the file
    // name instead
    // Browsers attach this content type when they are not sure what the
    // real type is
    if (MediaType.APPLICATION_OCTET_STREAM.getName().equals(contentType)) {
        format = Rio.getParserFormatForFileName(filePath.getFileName().toString());

        this.log.info("octet-stream contentType filename format={}", format);
    }
    // Otherwise use the content type directly in preference to using the
    // filename
    else if (contentType != null) {
        format = Rio.getParserFormatForMIMEType(contentType);

        this.log.info("non-octet-stream contentType format={}", format);
    }

    // If the content type choices failed to resolve the type, then try the
    // filename
    if (format == null) {
        format = Rio.getParserFormatForFileName(filePath.getFileName().toString());

        this.log.info("non-content-type filename format={}", format);
    }

    // Or fallback to RDF/XML which at minimum is able to detect when the
    // document is
    // structurally invalid
    if (format == null) {
        this.log.warn("Could not determine RDF format from request so falling back to RDF/XML");
        format = RDFFormat.RDFXML;
    }

    try (final InputStream inputStream = new BufferedInputStream(
            Files.newInputStream(filePath, StandardOpenOption.READ));) {
        return this.uploadFileAndLoadArtifactIntoPodd(inputStream, format, DanglingObjectPolicy.REPORT,
                DataReferenceVerificationPolicy.DO_NOT_VERIFY);
    } catch (final IOException e) {
        throw new ResourceException(Status.SERVER_ERROR_INTERNAL, "File IO error occurred", e);
    }

}

From source file:com.spectralogic.ds3client.integration.Smoke_Test.java

@Test
public void verifySendCrc32cChecksum() throws IOException, XmlProcessingException, URISyntaxException {
    final String bucketName = "crc_32_bucket";
    final String dataPolicyName = "crc_32_dp";
    final String storageDomainName = "crc_32_sd";
    final String poolPartitionName = "crc_32_pp";

    UUID storageDomainMemberId = null;
    UUID dataPersistenceRuleId = null;
    try {/*from   w w  w .j av a  2 s .  com*/
        //Create data policy
        final PutDataPolicySpectraS3Response dataPolicyResponse = createDataPolicyWithVersioningAndCrcRequired(
                dataPolicyName, VersioningLevel.NONE, ChecksumType.Type.CRC_32C, client);

        //Create storage domain
        final PutStorageDomainSpectraS3Response storageDomainResponse = createStorageDomain(storageDomainName,
                client);

        //Create pool partition
        final PutPoolPartitionSpectraS3Response poolPartitionResponse = createPoolPartition(poolPartitionName,
                PoolType.ONLINE, client);

        //Create storage domain member linking pool partition to storage domain
        final PutPoolStorageDomainMemberSpectraS3Response memberResponse = createPoolStorageDomainMember(
                storageDomainResponse.getStorageDomainResult().getId(),
                poolPartitionResponse.getPoolPartitionResult().getId(), client);
        storageDomainMemberId = memberResponse.getStorageDomainMemberResult().getId();

        //create data persistence rule
        final PutDataPersistenceRuleSpectraS3Response dataPersistenceResponse = createDataPersistenceRule(
                dataPolicyResponse.getDataPolicyResult().getId(),
                storageDomainResponse.getStorageDomainResult().getId(), client);
        dataPersistenceRuleId = dataPersistenceResponse.getDataPersistenceRuleResult().getDataPolicyId();

        //Create bucket with data policy
        client.putBucketSpectraS3(new PutBucketSpectraS3Request(bucketName)
                .withDataPolicyId(dataPolicyResponse.getDataPolicyResult().getId().toString()));

        //Verify send CRC 32c checksum
        final Ds3ClientHelpers helpers = Ds3ClientHelpers.wrap(client);

        helpers.ensureBucketExists(bucketName);

        final List<Ds3Object> objs = Lists.newArrayList(new Ds3Object("beowulf.txt", 294059));

        final MasterObjectList mol = client
                .putBulkJobSpectraS3(new PutBulkJobSpectraS3Request(bucketName, objs)).getResult();

        final FileChannel channel = FileChannel.open(ResourceUtils.loadFileResource("books/beowulf.txt"),
                StandardOpenOption.READ);

        final PutObjectResponse response = client.putObject(
                new PutObjectRequest(bucketName, "beowulf.txt", channel, mol.getJobId().toString(), 0, 294059)
                        .withChecksum(ChecksumType.compute(), ChecksumType.Type.CRC_32C));

        assertThat(response.getChecksumType(), is(ChecksumType.Type.CRC_32C));
        assertThat(response.getChecksum(), is("+ZBZbQ=="));

    } finally {
        deleteAllContents(client, bucketName);
        deleteDataPersistenceRule(dataPersistenceRuleId, client);
        deleteDataPolicy(dataPolicyName, client);
        deleteStorageDomainMember(storageDomainMemberId, client);
        deleteStorageDomain(storageDomainName, client);
        deletePoolPartition(poolPartitionName, client);
    }
}

From source file:org.apache.nifi.controller.StandardFlowSynchronizer.java

private byte[] readFlowFromDisk() throws IOException {
    final Path flowPath = nifiProperties.getFlowConfigurationFile().toPath();
    if (!Files.exists(flowPath) || Files.size(flowPath) == 0) {
        return new byte[0];
    }/*from w w w. j  av  a2s.co  m*/

    final ByteArrayOutputStream baos = new ByteArrayOutputStream();

    try (final InputStream in = Files.newInputStream(flowPath, StandardOpenOption.READ);
            final InputStream gzipIn = new GZIPInputStream(in)) {
        FileUtils.copy(gzipIn, baos);
    }

    return baos.toByteArray();
}

From source file:org.apache.nifi.processors.standard.TailFile.java

/**
 * Updates member variables to reflect the "expected recovery checksum" and
 * seek to the appropriate location in the tailed file, updating our
 * checksum, so that we are ready to proceed with the
 * {@link #onTrigger(ProcessContext, ProcessSession)} call.
 *
 * @param context the ProcessContext/*from  ww w  .  j a v  a 2s .  c om*/
 * @param stateValues the values that were recovered from state that was
 * previously stored. This Map should be populated with the keys defined in
 * {@link TailFileState.StateKeys}.
 * @param filePath the file of the file for which state must be recovered
 * @throws IOException if unable to seek to the appropriate location in the
 * tailed file.
 */
private void recoverState(final ProcessContext context, final Map<String, String> stateValues,
        final String filePath) throws IOException {

    final String prefix = MAP_PREFIX + states.get(filePath).getFilenameIndex() + '.';

    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.FILENAME)) {
        resetState(filePath);
        return;
    }
    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.POSITION)) {
        resetState(filePath);
        return;
    }
    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.TIMESTAMP)) {
        resetState(filePath);
        return;
    }
    if (!stateValues.containsKey(prefix + TailFileState.StateKeys.LENGTH)) {
        resetState(filePath);
        return;
    }

    final String checksumValue = stateValues.get(prefix + TailFileState.StateKeys.CHECKSUM);
    final boolean checksumPresent = (checksumValue != null);
    final String storedStateFilename = stateValues.get(prefix + TailFileState.StateKeys.FILENAME);
    final long position = Long.parseLong(stateValues.get(prefix + TailFileState.StateKeys.POSITION));
    final long timestamp = Long.parseLong(stateValues.get(prefix + TailFileState.StateKeys.TIMESTAMP));
    final long length = Long.parseLong(stateValues.get(prefix + TailFileState.StateKeys.LENGTH));

    FileChannel reader = null;
    File tailFile = null;

    if (checksumPresent && filePath.equals(storedStateFilename)) {
        states.get(filePath).setExpectedRecoveryChecksum(Long.parseLong(checksumValue));

        // We have an expected checksum and the currently configured filename is the same as the state file.
        // We need to check if the existing file is the same as the one referred to in the state file based on
        // the checksum.
        final Checksum checksum = new CRC32();
        final File existingTailFile = new File(storedStateFilename);
        if (existingTailFile.length() >= position) {
            try (final InputStream tailFileIs = new FileInputStream(existingTailFile);
                    final CheckedInputStream in = new CheckedInputStream(tailFileIs, checksum)) {
                StreamUtils.copy(in, new NullOutputStream(), states.get(filePath).getState().getPosition());

                final long checksumResult = in.getChecksum().getValue();
                if (checksumResult == states.get(filePath).getExpectedRecoveryChecksum()) {
                    // Checksums match. This means that we want to resume reading from where we left off.
                    // So we will populate the reader object so that it will be used in onTrigger. If the
                    // checksums do not match, then we will leave the reader object null, so that the next
                    // call to onTrigger will result in a new Reader being created and starting at the
                    // beginning of the file.
                    getLogger().debug(
                            "When recovering state, checksum of tailed file matches the stored checksum. Will resume where left off.");
                    tailFile = existingTailFile;
                    reader = FileChannel.open(tailFile.toPath(), StandardOpenOption.READ);
                    getLogger().debug("Created FileChannel {} for {} in recoverState",
                            new Object[] { reader, tailFile });

                    reader.position(position);
                } else {
                    // we don't seek the reader to the position, so our reader will start at beginning of file.
                    getLogger().debug(
                            "When recovering state, checksum of tailed file does not match the stored checksum. Will begin tailing current file from beginning.");
                }
            }
        } else {
            // fewer bytes than our position, so we know we weren't already reading from this file. Keep reader at a position of 0.
            getLogger().debug(
                    "When recovering state, existing file to tail is only {} bytes but position flag is {}; "
                            + "this indicates that the file has rotated. Will begin tailing current file from beginning.",
                    new Object[] { existingTailFile.length(), position });
        }

        states.get(filePath).setState(new TailFileState(filePath, tailFile, reader, position, timestamp, length,
                checksum, ByteBuffer.allocate(65536)));
    } else {
        resetState(filePath);
    }

    getLogger().debug("Recovered state {}", new Object[] { states.get(filePath).getState() });
}

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

@Override
public long importFrom(final Path content, final ContentClaim claim) throws IOException {
    try (final InputStream in = Files.newInputStream(content, StandardOpenOption.READ)) {
        return importFrom(in, claim);
    }/*from w w  w. ja v a  2  s .c  o  m*/
}

From source file:org.apache.nifi.processors.standard.TailFile.java

private void processTailFile(final ProcessContext context, final ProcessSession session,
        final String tailFile) {
    // If user changes the file that is being tailed, we need to consume the already-rolled-over data according
    // to the Initial Start Position property
    boolean rolloverOccurred;
    TailFileObject tfo = states.get(tailFile);

    if (tfo.isTailFileChanged()) {
        rolloverOccurred = false;//w ww  .j  a v a 2s  .co m
        final String recoverPosition = context.getProperty(START_POSITION).getValue();

        if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) {
            recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(),
                    tfo.getState().getTimestamp(), tfo.getState().getPosition());
        } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) {
            cleanup();
            tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer()));
        } else {
            final String filename = tailFile;
            final File file = new File(filename);

            try {
                final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
                getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file });

                final Checksum checksum = new CRC32();
                final long position = file.length();
                final long timestamp = file.lastModified();

                try (final InputStream fis = new FileInputStream(file);
                        final CheckedInputStream in = new CheckedInputStream(fis, checksum)) {
                    StreamUtils.copy(in, new NullOutputStream(), position);
                }

                fileChannel.position(position);
                cleanup();
                tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(),
                        checksum, tfo.getState().getBuffer()));
            } catch (final IOException ioe) {
                getLogger().error(
                        "Attempted to position Reader at current position in file {} but failed to do so due to {}",
                        new Object[] { file, ioe.toString() }, ioe);
                context.yield();
                return;
            }
        }

        tfo.setTailFileChanged(false);
    } else {
        // Recover any data that may have rolled over since the last time that this processor ran.
        // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value
        // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered"
        // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case,
        // use whatever checksum value is currently in the state.
        Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum();
        if (expectedChecksumValue == null) {
            expectedChecksumValue = tfo.getState().getChecksum() == null ? null
                    : tfo.getState().getChecksum().getValue();
        }

        rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue,
                tfo.getState().getTimestamp(), tfo.getState().getPosition());
        tfo.setExpectedRecoveryChecksum(null);
    }

    // initialize local variables from state object; this is done so that we can easily change the values throughout
    // the onTrigger method and then create a new state object after we finish processing the files.
    TailFileState state = tfo.getState();
    File file = state.getFile();
    FileChannel reader = state.getReader();
    Checksum checksum = state.getChecksum();
    if (checksum == null) {
        checksum = new CRC32();
    }
    long position = state.getPosition();
    long timestamp = state.getTimestamp();
    long length = state.getLength();

    // Create a reader if necessary.
    if (file == null || reader == null) {
        file = new File(tailFile);
        reader = createReader(file, position);
        if (reader == null) {
            context.yield();
            return;
        }
    }

    final long startNanos = System.nanoTime();

    // Check if file has rotated
    if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length())
            || (timestamp < file.lastModified() && length >= file.length())) {

        // Since file has rotated, we close the reader, create a new one, and then reset our state.
        try {
            reader.close();
            getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader });
        } catch (final IOException ioe) {
            getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe });
        }

        reader = createReader(file, 0L);
        position = 0L;
        checksum.reset();
    }

    if (file.length() == position || !file.exists()) {
        // no data to consume so rather than continually running, yield to allow other processors to use the thread.
        getLogger().debug("No data to consume; created no FlowFiles");
        tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
                state.getBuffer()));
        persistState(tfo, context);
        context.yield();
        return;
    }

    // If there is data to consume, read as much as we can.
    final TailFileState currentState = state;
    final Checksum chksum = checksum;
    // data has been written to file. Stream it to a new FlowFile.
    FlowFile flowFile = session.create();

    final FileChannel fileReader = reader;
    final AtomicLong positionHolder = new AtomicLong(position);
    flowFile = session.write(flowFile, new OutputStreamCallback() {
        @Override
        public void process(final OutputStream rawOut) throws IOException {
            try (final OutputStream out = new BufferedOutputStream(rawOut)) {
                positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum));
            }
        }
    });

    // If there ended up being no data, just remove the FlowFile
    if (flowFile.getSize() == 0) {
        session.remove(flowFile);
        getLogger().debug("No data to consume; removed created FlowFile");
    } else {
        // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension>
        final String tailFilename = file.getName();
        final String baseName = StringUtils.substringBeforeLast(tailFilename, ".");
        final String flowFileName;
        if (baseName.length() < tailFilename.length()) {
            flowFileName = baseName + "." + position + "-" + positionHolder.get() + "."
                    + StringUtils.substringAfterLast(tailFilename, ".");
        } else {
            flowFileName = baseName + "." + position + "-" + positionHolder.get();
        }

        final Map<String, String> attributes = new HashMap<>(3);
        attributes.put(CoreAttributes.FILENAME.key(), flowFileName);
        attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain");
        attributes.put("tailfile.original.path", tailFile);
        flowFile = session.putAllAttributes(flowFile, attributes);

        session.getProvenanceReporter().receive(flowFile, file.toURI().toString(),
                "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file",
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
        session.transfer(flowFile, REL_SUCCESS);
        position = positionHolder.get();

        // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state.
        // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date
        // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the
        // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the
        // rotated file a second time.
        timestamp = Math.max(state.getTimestamp(), file.lastModified());
        length = file.length();
        getLogger().debug("Created {} and routed to success", new Object[] { flowFile });
    }

    // Create a new state object to represent our current position, timestamp, etc.
    tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
            state.getBuffer()));

    // We must commit session before persisting state in order to avoid data loss on restart
    session.commit();
    persistState(tfo, context);
}