Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:org.goko.controller.grbl.v09.GrblControllerService.java

/** (inheritDoc)
 * @see org.goko.core.controller.IProbingService#probe(java.util.List)
 *//* w  w w .  j  a v a  2  s  . co  m*/
@Override
public CompletionService<ProbeResult> probe(List<ProbeRequest> lstProbeRequest) throws GkException {
    Executor executor = Executors.newSingleThreadExecutor();
    this.completionService = new ExecutorCompletionService<ProbeResult>(executor);
    this.lstProbeCallable = new ArrayList<>();

    for (ProbeRequest probeRequest : lstProbeRequest) {
        ProbeCallable probeCallable = new ProbeCallable();
        this.lstProbeCallable.add(probeCallable);
        completionService.submit(probeCallable);
    }

    probeGCodeProvider = getZProbingCode(lstProbeRequest, getGCodeContext());
    probeGCodeProvider.setCode("TinyG probing");
    gcodeService.addGCodeProvider(probeGCodeProvider);
    probeGCodeProvider = gcodeService.getGCodeProvider(probeGCodeProvider.getId());// Required since internally the provider is a new one
    executionService.addToExecutionQueue(probeGCodeProvider);
    executionService.beginQueueExecution();

    return completionService;
}

From source file:org.codice.alliance.nsili.source.NsiliSource.java

public void setNumberWorkerThreads(int numberWorkerThreads) {
    List<Runnable> waitingTasks = null;
    if (executorService != null) {
        waitingTasks = executorService.shutdownNow();
    }/*from  w w w  .j  a v a2 s  .co  m*/

    executorService = Executors.newFixedThreadPool(numberWorkerThreads);
    completionService = new ExecutorCompletionService(executorService);
    if (waitingTasks != null) {
        for (Runnable task : waitingTasks) {
            executorService.submit(task);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status)
        throws IOException, UnsupportedEncodingException {
    // Load in all the HStores.

    long maxSeqId = -1;
    // initialized to -1 so that we pick up MemstoreTS from column families
    long maxMemstoreTS = -1;

    if (!htableDescriptor.getFamilies().isEmpty()) {
        // initialize the thread pool for opening stores in parallel.
        ThreadPoolExecutor storeOpenerThreadPool = getStoreOpenAndCloseThreadPool(
                "StoreOpener-" + this.getRegionInfo().getShortNameToLog());
        CompletionService<HStore> completionService = new ExecutorCompletionService<HStore>(
                storeOpenerThreadPool);//from w  w w. j a va 2s  .  c o  m

        // initialize each store in parallel
        for (final HColumnDescriptor family : htableDescriptor.getFamilies()) {
            status.setStatus("Instantiating store for column family " + family);
            completionService.submit(new Callable<HStore>() {
                @Override
                public HStore call() throws IOException {
                    return instantiateHStore(family);
                }
            });
        }
        boolean allStoresOpened = false;
        try {
            for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) {
                Future<HStore> future = completionService.take();
                HStore store = future.get();
                this.stores.put(store.getColumnFamilyName().getBytes(), store);

                long storeMaxSequenceId = store.getMaxSequenceId();
                maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId);
                if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
                    maxSeqId = storeMaxSequenceId;
                }
                long maxStoreMemstoreTS = store.getMaxMemstoreTS();
                if (maxStoreMemstoreTS > maxMemstoreTS) {
                    maxMemstoreTS = maxStoreMemstoreTS;
                }
            }
            allStoresOpened = true;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e.getCause());
        } finally {
            storeOpenerThreadPool.shutdownNow();
            if (!allStoresOpened) {
                // something went wrong, close all opened stores
                LOG.error("Could not initialize all stores for the region=" + this);
                for (Store store : this.stores.values()) {
                    try {
                        store.close();
                    } catch (IOException e) {
                        LOG.warn(e.getMessage());
                    }
                }
            }
        }
    }
    mvcc.initialize(maxMemstoreTS + 1);
    // Recover any edits if available.
    maxSeqId = Math.max(maxSeqId,
            replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
    return maxSeqId;
}

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test(expected = DockerTimeoutException.class)
public void testConnectionRequestTimeout() throws Exception {
    final int connectionPoolSize = 1;
    final int callableCount = connectionPoolSize * 100;

    final ExecutorService executor = Executors.newCachedThreadPool();
    final CompletionService completion = new ExecutorCompletionService(executor);

    // Spawn and wait on many more containers than the connection pool size.
    // This should cause a timeout once the connection pool is exhausted.

    try (final DockerClient dockerClient = DefaultDockerClient.fromEnv().connectionPoolSize(connectionPoolSize)
            .build()) {/*  www. j  a v a  2  s .c o m*/
        // Create container
        final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST)
                .cmd("sh", "-c", "while :; do sleep 1; done").build();
        final String name = randomName();
        final ContainerCreation creation = dockerClient.createContainer(config, name);
        final String id = creation.id();

        // Start the container
        dockerClient.startContainer(id);

        // Submit a bunch of waitContainer requests
        for (int i = 0; i < callableCount; i++) {
            //noinspection unchecked
            completion.submit(new Callable<ContainerExit>() {
                @Override
                public ContainerExit call() throws Exception {
                    return dockerClient.waitContainer(id);
                }
            });
        }

        // Wait for the requests to complete or throw expected exception
        for (int i = 0; i < callableCount; i++) {
            try {
                completion.take().get();
            } catch (ExecutionException e) {
                Throwables.propagateIfInstanceOf(e.getCause(), DockerTimeoutException.class);
                throw e;
            }
        }
    } finally {
        executor.shutdown();
    }
}

From source file:com.alibaba.wasp.fserver.EntityGroup.java

private synchronized void commitTransaction(WALEdit edit) throws IOException {
    Transaction t = edit.getT();/*from w  w w  . j  a v  a  2s.  c o m*/
    if (LOG.isDebugEnabled()) {
        LOG.debug("EntityGroup commitTransaction:" + t.getTransactionID());
    }
    List<Mutate> mutates = t.getEdits();
    CompletionService<InsureStatus> completionService = new ExecutorCompletionService<InsureStatus>(
            this.services.getThreadPool());

    for (Mutate mutate : mutates) {
        String tableName = mutate.getTableName();
        try {
            if (mutate.getMutateType() == Mutate.MutateType.PUT) {
                Put put = ProtobufUtil.toPut(mutate, t.getTransactionID());
                completionService.submit(new InsurePut(tableName, put));
            } else if (mutate.getMutateType() == Mutate.MutateType.DELETE) {
                Delete delete = ProtobufUtil.toDelete(mutate);
                completionService.submit(new InsureDelete(tableName, delete));
            }
        } catch (DoNotRetryIOException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("convert mutate to Put or Delete error.", e);
            }
        }
    }

    int errors = 0;
    for (int i = 0; i < mutates.size(); i++) {
        try {
            Future<InsureStatus> result = completionService.take();
            if (InsureStatus.SUCCESS == result.get()) {
                // nothing,this operator is successful.
            } else if (InsureStatus.FAILED == result.get()) {
                errors++;
            } else {
                LOG.warn("What happened?");
                errors++;
            }
        } catch (InterruptedException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("transaction execute error", e);
            }
        } catch (ExecutionException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("transaction execute error", e);
            }
        }
    }
    if (errors != 0) {
        String message = "transaction id=" + t.getTransactionID() + " process occur " + errors + " errors";
        LOG.warn(message);
        throw new IOException(message);
    }

    try {
        redo.commit(edit);
    } catch (AlreadyCommitTransactionException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID() + " has all ready commited", e);
        }
    } catch (NotInitlizedRedoException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID()
                    + " commited failed as a result of the redo log has a error ", e);
        }
    } catch (RedoLogNotServingException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID()
                    + " commited failed as a result of the redo log has been closed ", e);
        }
    }
    Primary primary = edit.getAction();
    if (primary != null) {
        this.releaseRowLock(primary.getCombinedPrimaryKey());
    }
}

From source file:org.apache.hadoop.hdfs.DFSInputStream.java

/**
 * Like {@link #fetchBlockByteRange(LocatedBlock, long, long, byte[],
 * int, Map)} except we start up a second, parallel, 'hedged' read
 * if the first read is taking longer than configured amount of
 * time.  We then wait on which ever read returns first.
 * /*from  www .  j a  va2 s  .c o m*/
 * @param block
 * @param start
 * @param end
 * @param buf
 * @param offset
 * @param corruptedBlockMap
 * @throws IOException
 */
private void hedgedFetchBlockByteRange(long blockStartOffset, long start, long end, byte[] buf, int offset,
        Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException {
    ArrayList<Future<ByteBuffer>> futures = new ArrayList<Future<ByteBuffer>>();
    CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<ByteBuffer>(
            dfsClient.getHedgedReadsThreadPool());
    ArrayList<DatanodeInfo> ignored = new ArrayList<DatanodeInfo>();
    ByteBuffer bb = null;
    int len = (int) (end - start + 1);
    int hedgedReadId = 0;
    LocatedBlock block = getBlockAt(blockStartOffset);
    while (true) {
        // see HDFS-6591, this metric is used to verify/catch unnecessary loops
        hedgedReadOpsLoopNumForTesting++;
        DNAddrPair chosenNode = null;
        // there is no request already executing.
        if (futures.isEmpty()) {
            // chooseDataNode is a commitment. If no node, we go to
            // the NN to reget block locations. Only go here on first read.
            chosenNode = chooseDataNode(block, ignored);
            bb = ByteBuffer.wrap(buf, offset, len);
            Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode,
                    block.getStartOffset(), start, end, bb, corruptedBlockMap, hedgedReadId++);
            Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
            futures.add(firstRequest);
            try {
                Future<ByteBuffer> future = hedgedService.poll(dfsClient.getHedgedReadTimeout(),
                        TimeUnit.MILLISECONDS);
                if (future != null) {
                    future.get();
                    return;
                }
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Waited " + dfsClient.getHedgedReadTimeout() + "ms to read from "
                            + chosenNode.info + "; spawning hedged read");
                }
                // Ignore this node on next go around.
                ignored.add(chosenNode.info);
                dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                continue; // no need to refresh block locations
            } catch (InterruptedException e) {
                // Ignore
            } catch (ExecutionException e) {
                // Ignore already logged in the call.
            }
        } else {
            // We are starting up a 'hedged' read. We have a read already
            // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
            // If no nodes to do hedged reads against, pass.
            try {
                try {
                    chosenNode = getBestNodeDNAddrPair(block.getLocations(), ignored);
                } catch (IOException ioe) {
                    chosenNode = chooseDataNode(block, ignored);
                }
                bb = ByteBuffer.allocate(len);
                Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode,
                        block.getStartOffset(), start, end, bb, corruptedBlockMap, hedgedReadId++);
                Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
                futures.add(oneMoreRequest);
            } catch (IOException ioe) {
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Failed getting node for hedged read: " + ioe.getMessage());
                }
            }
            // if not succeeded. Submit callables for each datanode in a loop, wait
            // for a fixed interval and get the result from the fastest one.
            try {
                ByteBuffer result = getFirstToComplete(hedgedService, futures);
                // cancel the rest.
                cancelAll(futures);
                if (result.array() != buf) { // compare the array pointers
                    dfsClient.getHedgedReadMetrics().incHedgedReadWins();
                    System.arraycopy(result.array(), result.position(), buf, offset, len);
                } else {
                    dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                }
                return;
            } catch (InterruptedException ie) {
                // Ignore and retry
            }
            // We got here if exception. Ignore this node on next go around IFF
            // we found a chosenNode to hedge read against.
            if (chosenNode != null && chosenNode.info != null) {
                ignored.add(chosenNode.info);
            }
        }
    }
}

From source file:com.mellanox.r4h.DFSInputStream.java

/**
 * Like {@link #fetchBlockByteRange(LocatedBlock, long, long, byte[], int, Map)} except we start up a second, parallel, 'hedged' read
 * if the first read is taking longer than configured amount of
 * time. We then wait on which ever read returns first.
 *//*  ww w .j  a va  2 s .  c om*/
private void hedgedFetchBlockByteRange(LocatedBlock block, long start, long end, byte[] buf, int offset,
        Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException {
    ArrayList<Future<ByteBuffer>> futures = new ArrayList<Future<ByteBuffer>>();
    CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<ByteBuffer>(
            dfsClient.getHedgedReadsThreadPool());
    ArrayList<DatanodeInfo> ignored = new ArrayList<DatanodeInfo>();
    ByteBuffer bb = null;
    int len = (int) (end - start + 1);
    int hedgedReadId = 0;
    block = getBlockAt(block.getStartOffset());
    while (true) {
        // see HDFS-6591, this metric is used to verify/catch unnecessary loops
        hedgedReadOpsLoopNumForTesting++;
        DNAddrPair chosenNode = null;
        // there is no request already executing.
        if (futures.isEmpty()) {
            // chooseDataNode is a commitment. If no node, we go to
            // the NN to reget block locations. Only go here on first read.
            chosenNode = chooseDataNode(block, ignored);
            bb = ByteBuffer.wrap(buf, offset, len);
            Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb,
                    corruptedBlockMap, hedgedReadId++);
            Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
            futures.add(firstRequest);
            try {
                Future<ByteBuffer> future = hedgedService.poll(dfsClient.getHedgedReadTimeout(),
                        TimeUnit.MILLISECONDS);
                if (future != null) {
                    future.get();
                    return;
                }
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Waited " + dfsClient.getHedgedReadTimeout() + "ms to read from "
                            + chosenNode.info + "; spawning hedged read");
                }
                // Ignore this node on next go around.
                ignored.add(chosenNode.info);
                dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                continue; // no need to refresh block locations
            } catch (InterruptedException e) {
                // Ignore
            } catch (ExecutionException e) {
                // Ignore already logged in the call.
            }
        } else {
            // We are starting up a 'hedged' read. We have a read already
            // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
            // If no nodes to do hedged reads against, pass.
            try {
                try {
                    chosenNode = getBestNodeDNAddrPair(block, ignored);
                } catch (IOException ioe) {
                    chosenNode = chooseDataNode(block, ignored);
                }
                bb = ByteBuffer.allocate(len);
                Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end,
                        bb, corruptedBlockMap, hedgedReadId++);
                Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
                futures.add(oneMoreRequest);
            } catch (IOException ioe) {
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Failed getting node for hedged read: " + ioe.getMessage());
                }
            }
            // if not succeeded. Submit callables for each datanode in a loop, wait
            // for a fixed interval and get the result from the fastest one.
            try {
                ByteBuffer result = getFirstToComplete(hedgedService, futures);
                // cancel the rest.
                cancelAll(futures);
                if (result.array() != buf) { // compare the array pointers
                    dfsClient.getHedgedReadMetrics().incHedgedReadWins();
                    System.arraycopy(result.array(), result.position(), buf, offset, len);
                } else {
                    dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                }
                return;
            } catch (InterruptedException ie) {
                // Ignore and retry
            }
            // We got here if exception. Ignore this node on next go around IFF
            // we found a chosenNode to hedge read against.
            if (chosenNode != null && chosenNode.info != null) {
                ignored.add(chosenNode.info);
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status) throws IOException {
    if (isClosed()) {
        LOG.warn("Region " + this + " already closed");
        return null;
    }//from   w  w  w. jav  a2 s .  c om

    if (coprocessorHost != null) {
        status.setStatus("Running coprocessor pre-close hooks");
        this.coprocessorHost.preClose(abort);
    }

    status.setStatus("Disabling compacts and flushes for region");
    synchronized (writestate) {
        // Disable compacting and flushing by background threads for this
        // region.
        writestate.writesEnabled = false;
        LOG.debug("Closing " + this + ": disabling compactions & flushes");
        waitForFlushesAndCompactions();
    }
    // If we were not just flushing, is it worth doing a preflush...one
    // that will clear out of the bulk of the memstore before we put up
    // the close flag?
    if (!abort && worthPreFlushing()) {
        status.setStatus("Pre-flushing region before close");
        LOG.info("Running close preflush of " + this.getRegionNameAsString());
        try {
            internalFlushcache(status);
        } catch (IOException ioe) {
            // Failed to flush the region. Keep going.
            status.setStatus("Failed pre-flush " + this + "; " + ioe.getMessage());
        }
    }

    this.closing.set(true);
    status.setStatus("Disabling writes for close");
    // block waiting for the lock for closing
    lock.writeLock().lock();
    try {
        if (this.isClosed()) {
            status.abort("Already got closed by another process");
            // SplitTransaction handles the null
            return null;
        }
        LOG.debug("Updates disabled for region " + this);
        // Don't flush the cache if we are aborting
        if (!abort) {
            int flushCount = 0;
            while (this.getMemstoreSize().get() > 0) {
                try {
                    if (flushCount++ > 0) {
                        int actualFlushes = flushCount - 1;
                        if (actualFlushes > 5) {
                            // If we tried 5 times and are unable to clear memory, abort
                            // so we do not lose data
                            throw new DroppedSnapshotException("Failed clearing memory after " + actualFlushes
                                    + " attempts on region: " + Bytes.toStringBinary(getRegionName()));
                        }
                        LOG.info("Running extra flush, " + actualFlushes + " (carrying snapshot?) " + this);
                    }
                    internalFlushcache(status);
                } catch (IOException ioe) {
                    status.setStatus("Failed flush " + this + ", putting online again");
                    synchronized (writestate) {
                        writestate.writesEnabled = true;
                    }
                    // Have to throw to upper layers.  I can't abort server from here.
                    throw ioe;
                }
            }
        }

        Map<byte[], List<StoreFile>> result = new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
        if (!stores.isEmpty()) {
            // initialize the thread pool for closing stores in parallel.
            ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool(
                    "StoreCloserThread-" + this.getRegionNameAsString());
            CompletionService<Pair<byte[], Collection<StoreFile>>> completionService = new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>(
                    storeCloserThreadPool);

            // close each store in parallel
            for (final Store store : stores.values()) {
                assert abort || store.getFlushableSize() == 0;
                completionService.submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
                    @Override
                    public Pair<byte[], Collection<StoreFile>> call() throws IOException {
                        return new Pair<byte[], Collection<StoreFile>>(store.getFamily().getName(),
                                store.close());
                    }
                });
            }
            try {
                for (int i = 0; i < stores.size(); i++) {
                    Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
                    Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
                    List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
                    if (familyFiles == null) {
                        familyFiles = new ArrayList<StoreFile>();
                        result.put(storeFiles.getFirst(), familyFiles);
                    }
                    familyFiles.addAll(storeFiles.getSecond());
                }
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            } catch (ExecutionException e) {
                throw new IOException(e.getCause());
            } finally {
                storeCloserThreadPool.shutdownNow();
            }
        }
        this.closed.set(true);
        if (memstoreSize.get() != 0)
            LOG.error("Memstore size is " + memstoreSize.get());
        if (coprocessorHost != null) {
            status.setStatus("Running coprocessor post-close hooks");
            this.coprocessorHost.postClose(abort);
        }
        if (this.metricsRegion != null) {
            this.metricsRegion.close();
        }
        if (this.metricsRegionWrapper != null) {
            Closeables.closeQuietly(this.metricsRegionWrapper);
        }
        status.markComplete("Closed");
        LOG.info("Closed " + this);
        return result;
    } finally {
        lock.writeLock().unlock();
    }
}

From source file:org.openspaces.admin.internal.admin.DefaultAdmin.java

@Override
public DumpResult generateDump(final Set<DumpProvider> dumpProviders, final DumpGeneratedListener listener,
        final String cause, final Map<String, Object> context, final String... processor)
        throws AdminException {
    CompoundDumpResult dumpResult = new CompoundDumpResult();

    ExecutorService es = Executors.newFixedThreadPool(dumpProviders.size());
    CompletionService<DumpResult> cs = new ExecutorCompletionService<DumpResult>(es);

    final AtomicInteger counter = new AtomicInteger();
    for (final DumpProvider dumpProvider : dumpProviders) {
        cs.submit(new Callable<DumpResult>() {
            @Override//ww  w  .  ja  v a 2s  .co m
            public DumpResult call() throws Exception {
                DumpResult result = dumpProvider.generateDump(cause, context, processor);
                synchronized (listener) {
                    listener.onGenerated(dumpProvider, result, counter.incrementAndGet(), dumpProviders.size());
                }
                return result;
            }
        });
    }

    for (int i = 0; i < dumpProviders.size(); i++) {
        try {
            dumpResult.add(cs.take().get());
        } catch (Exception e) {
            // ignore it for now
        }
    }

    es.shutdown();

    return dumpResult;
}