Example usage for java.lang Thread interrupted

List of usage examples for java.lang Thread interrupted

Introduction

In this page you can find the example usage for java.lang Thread interrupted.

Prototype

public static boolean interrupted() 

Source Link

Document

Tests whether the current thread has been interrupted.

Usage

From source file:me.spadival.podmode.PodModeService.java

private podCommand readCommand() {
    byte[] rbuf = new byte[4096];
    int len = 0;/*from ww  w.  ja  v a2  s .  com*/
    boolean notEnoughData = false;

    while (mPodRunning) {

        if (Thread.interrupted())
            return null;

        if (mReadLen < 6 || notEnoughData) {

            try {
                len = mSerialRead(rbuf);
            } catch (NullPointerException e) {
                Log.d("PodMode", "Read failed - Null pointer");
                mPodRunning = false;
            }

            if (len > 0) {
                System.arraycopy(rbuf, 0, mReadBuffer, mReadLen, len);
                mReadLen += len;
                notEnoughData = false;
            } else if (len < 0) {
                Log.d("PodMode read error : ", String.valueOf(len));

                if (mDeviceType == deviceType.FT232PL2303) {
                    mSerialHost = new FTDriver((UsbManager) getSystemService(Context.USB_SERVICE));
                    mSerialHost.begin(mSerialBaudRate);
                }
            }

        } else {

            if (mReadBuffer[0] == (byte) 0xFF && mReadBuffer[1] == (byte) 0x55
                    && mReadBuffer[2] == (byte) 0xF7) {
                System.arraycopy(mReadBuffer, 3, mReadBuffer, 0, mReadLen - 3);
                mReadLen -= 3;
            }

            if (mReadBuffer[0] == (byte) 0xFF && mReadBuffer[1] == (byte) 0x55) {

                int readBufLen = (int) (mReadBuffer[2] & 0xFF);
                int cmdLen = readBufLen + 4;

                if (mReadLen >= cmdLen) {

                    podCommand cmd = new podCommand(mReadBuffer, cmdLen);

                    if (mReadLen == cmdLen)
                        mReadLen = 0;
                    else {
                        System.arraycopy(mReadBuffer, cmdLen, mReadBuffer, 0, mReadLen - cmdLen);
                        mReadLen -= cmdLen;
                    }

                    return cmd;

                } else
                    notEnoughData = true;

            } else {

                if (mReadLen == 1) {
                    if (mReadBuffer[0] != (byte) 0xFF) {
                        mReadLen = 0;
                    }
                } else {
                    System.arraycopy(mReadBuffer, 1, mReadBuffer, 0, mReadLen - 1);
                    mReadLen -= 1;
                }
            }

        }
    }
    return null;
}

From source file:net.yacy.peers.Protocol.java

/**
 * Execute solr query against specified target.
 * @param event search event ot feed with results
 * @param solrQuery solr query/*from  w  w  w. j  av  a2 s . com*/
 * @param offset pagination start indice
 * @param count expected maximum results
 * @param target target peer to query. May be null : in that case, local peer is queried.
 * @param partitions
 * @param blacklist url list to exclude from results
 * @param useSolrFacets when true, use Solr computed facets when possible to update the event navigators counters
 * @param incrementNavigators when true, increment event navigators either with facet counts or with individual results
 * @return the size of results list
 * @throws InterruptedException when interrupt status on calling thread is detected while processing
 */
protected static int solrQuery(final SearchEvent event, final SolrQuery solrQuery, final int offset,
        final int count, final Seed target, final int partitions, final Blacklist blacklist,
        final boolean useSolrFacets, final boolean incrementNavigators) throws InterruptedException {

    //try {System.out.println("*** debug-query *** " + URLDecoder.decode(solrQuery.toString(), "UTF-8"));} catch (UnsupportedEncodingException e) {}

    if (event.query.getQueryGoal().getQueryString(false) == null
            || event.query.getQueryGoal().getQueryString(false).length() == 0) {
        return -1; // we cannot query solr only with word hashes, there is no clear text string
    }
    event.addExpectedRemoteReferences(count);
    if (partitions > 0)
        solrQuery.set("partitions", partitions);
    solrQuery.setStart(offset);
    solrQuery.setRows(count);

    boolean localsearch = target == null || target.equals(event.peers.mySeed());
    Map<String, ReversibleScoreMap<String>> facets = new HashMap<String, ReversibleScoreMap<String>>(
            event.query.facetfields.size());
    Map<String, LinkedHashSet<String>> snippets = new HashMap<String, LinkedHashSet<String>>(); // this will be a list of urlhash-snippet entries
    final QueryResponse[] rsp = new QueryResponse[] { null };
    final SolrDocumentList[] docList = new SolrDocumentList[] { null };
    {// encapsulate expensive solr QueryResponse object
        if (localsearch && !Switchboard.getSwitchboard()
                .getConfigBool(SwitchboardConstants.DEBUG_SEARCH_REMOTE_SOLR_TESTLOCAL, false)) {
            // search the local index
            try {
                SolrConnector sc = event.getQuery().getSegment().fulltext().getDefaultConnector();
                if (!sc.isClosed()) {
                    rsp[0] = sc.getResponseByParams(solrQuery);
                    docList[0] = rsp[0].getResults();
                }
            } catch (final Throwable e) {
                Network.log.info("SEARCH failed (solr), localpeer (" + e.getMessage() + ")", e);
                return -1;
            }
        } else {
            String targetBaseURL = null;
            try {
                final boolean myseed = target == event.peers.mySeed();
                if (myseed) {
                    targetBaseURL = "http://localhost:" + target.getPort();
                } else {
                    final Set<String> ips = target.getIPs();
                    if (ips.isEmpty()) {
                        /* This should not happen : seeds db maintains only seeds with at least one IP */
                        Network.log.info("SEARCH failed (solr), remote Peer: " + target.getName()
                                + " has no known IP address");
                        target.setFlagSolrAvailable(false);
                        return -1;
                    }
                    final String ip = ips.iterator().next();

                    targetBaseURL = target.getPublicURL(ip,
                            Switchboard.getSwitchboard().getConfigBool(
                                    SwitchboardConstants.REMOTESEARCH_HTTPS_PREFERRED,
                                    SwitchboardConstants.REMOTESEARCH_HTTPS_PREFERRED_DEFAULT));
                }
                if (!myseed && !target.getFlagSolrAvailable()) { // skip if peer.dna has flag that last try resulted in error
                    Network.log.info("SEARCH skip (solr), remote Solr interface not accessible, peer="
                            + target.getName());
                    return -1;
                }
                final int solrtimeout = Switchboard.getSwitchboard()
                        .getConfigInt(SwitchboardConstants.FEDERATED_SERVICE_SOLR_INDEXING_TIMEOUT, 6000);
                SolrRequestTask remoteRequest = new SolrRequestTask(solrQuery, targetBaseURL, target, myseed,
                        solrtimeout, rsp, docList);
                remoteRequest.start();
                remoteRequest.join(solrtimeout); // just wait until timeout appears
                if (remoteRequest.isAlive()) {
                    /* Try to free the request thread resources properly */
                    remoteRequest.close();
                    if (remoteRequest.isAlive()) {
                        /* Thread still running : try also with interrupt*/
                        remoteRequest.interrupt();
                    }
                    Network.log.info("SEARCH failed (solr), remote Peer: " + target.getName() + "/"
                            + targetBaseURL + " does not answer (time-out)");
                    target.setFlagSolrAvailable(false || myseed);
                    return -1; // give up, leave remoteRequest abandoned.
                }

                if (rsp[0] == null || docList[0] == null) {
                    Network.log.info("SEARCH failed (solr), remote Peer: " + target.getName() + "/"
                            + targetBaseURL + " returned null");
                    if (!myseed) {
                        if (targetBaseURL.startsWith("https")) {
                            /* First mark https unavailable on this peer before removing anything else */
                            target.setFlagSSLAvailable(false);
                            event.peers.updateConnected(target);
                        } else {
                            target.setFlagSolrAvailable(false);
                        }
                    }
                    return -1;
                }
            } catch (InterruptedException e) {
                /* Current thread might be interrupted by SearchEvent.cleanup() : 
                 * we must not in that case mark the target as not available but rather transmit the exception to the caller (likely RemoteSearch.solrRemoteSearch) */
                throw e;
            } catch (final Throwable e) {
                if (Network.log.isInfo()) {
                    Network.log.info("SEARCH failed (solr), remote Peer: " + target.getName()
                            + (targetBaseURL != null ? "/" + targetBaseURL : "") + " (" + e.getMessage() + ")");
                }
                target.setFlagSolrAvailable(false || localsearch);
                return -1;
            }
        }

        // evaluate facets
        if (useSolrFacets) {
            for (String field : event.query.facetfields) {
                FacetField facet = rsp[0].getFacetField(field);
                ReversibleScoreMap<String> result = new ClusteredScoreMap<String>(
                        UTF8.insensitiveUTF8Comparator);
                List<Count> values = facet == null ? null : facet.getValues();
                if (values == null) {
                    continue;
                }
                for (Count ff : values) {
                    int c = (int) ff.getCount();
                    if (c == 0) {
                        continue;
                    }
                    if (ff.getName().length() == 0) {
                        continue; // facet entry without text is not useful
                    }
                    result.set(ff.getName(), c);
                }
                if (result.size() > 0) {
                    facets.put(field, result);
                }
            }
        }

        // evaluate snippets
        final Map<String, Map<String, List<String>>> rawsnippets = rsp[0].getHighlighting(); // a map from the urlhash to a map with key=field and value = list of snippets
        if (rawsnippets != null) {
            nextsnippet: for (final Map.Entry<String, Map<String, List<String>>> re : rawsnippets.entrySet()) {
                final Map<String, List<String>> rs = re.getValue();
                for (final String field : solrQuery.getHighlightFields()) {
                    if (rs.containsKey(field)) {
                        final List<String> s = rs.get(field);
                        if (s.size() > 0) {
                            final LinkedHashSet<String> ls = new LinkedHashSet<String>();
                            ls.addAll(s);
                            snippets.put(re.getKey(), ls);
                            continue nextsnippet;
                        }
                    }
                }
                // no snippet found :( --we don't assign a value here by default; that can be done as an evaluation outside this method
            }
        }
        rsp[0] = null;
    }

    // evaluate result
    if (docList == null || docList[0].size() == 0) {
        Network.log.info("SEARCH (solr), returned 0 out of 0 documents from "
                + (target == null ? "shard" : ("peer " + target.hash + ":" + target.getName())) + " query = "
                + solrQuery.toString());
        return 0;
    }

    List<URIMetadataNode> resultContainer = new ArrayList<URIMetadataNode>();
    Network.log.info("SEARCH (solr), returned " + docList[0].size() + " out of " + docList[0].getNumFound()
            + " documents and " + facets.size() + " facets " + facets.keySet().toString() + " from "
            + (target == null ? "shard" : ("peer " + target.hash + ":" + target.getName())));
    int term = count;
    Collection<SolrInputDocument> docs;
    if (event.addResultsToLocalIndex) { // only needed to store remote results
        docs = new ArrayList<SolrInputDocument>(docList[0].size());
    } else
        docs = null;
    for (final SolrDocument tmpdoc : docList[0]) {
        //System.out.println("***DEBUG*** " + ((String) doc.getFieldValue("sku")));
        if (term-- <= 0) {
            break; // do not process more that requested (in case that evil peers fill us up with rubbish)
        }
        // get one single search result
        if (tmpdoc == null) {
            continue;
        }
        URIMetadataNode urlEntry;
        try {
            urlEntry = new URIMetadataNode(tmpdoc);
        } catch (MalformedURLException ex) {
            continue;
        }

        if (blacklist.isListed(BlacklistType.SEARCH, urlEntry.url())) {
            if (Network.log.isInfo()) {
                if (localsearch) {
                    Network.log.info("local search (solr): filtered blacklisted url "
                            + urlEntry.url().toNormalform(true));
                } else {
                    Network.log.info("remote search (solr): filtered blacklisted url "
                            + urlEntry.url().toNormalform(true) + " from "
                            + (target == null ? "shard" : ("peer " + target.hash + ":" + target.getName())));
                }
            }
            continue; // block with blacklist
        }

        final String urlRejectReason = Switchboard.getSwitchboard().crawlStacker
                .urlInAcceptedDomain(urlEntry.url());
        if (urlRejectReason != null) {
            if (Network.log.isInfo()) {
                if (localsearch) {
                    Network.log.info("local search (solr): rejected url '" + urlEntry.url().toNormalform(true)
                            + "' (" + urlRejectReason + ")");
                } else {
                    Network.log.info("remote search (solr): rejected url '" + urlEntry.url().toNormalform(true)
                            + "' (" + urlRejectReason + ") from peer " + target.getName());
                }
            }
            continue; // reject url outside of our domain
        }

        // passed all checks, store url
        if (!localsearch) {

            // put the remote documents to the local index. We must convert the solr document to a solr input document:
            if (event.addResultsToLocalIndex) {
                /* Check document size, only if a limit is set on remote documents size allowed to be stored to local index */
                if (checkDocumentSize(tmpdoc, event.getRemoteDocStoredMaxSize() * 1024)) {
                    final SolrInputDocument sid = event.query.getSegment().fulltext().getDefaultConfiguration()
                            .toSolrInputDocument(tmpdoc);

                    // the input document stays untouched because it contains top-level cloned objects
                    docs.add(sid);
                    // will be stored to index, and is a full solr document, can be added to firstseen
                    event.query.getSegment().setFirstSeenTime(urlEntry.hash(),
                            Math.min(urlEntry.moddate().getTime(), System.currentTimeMillis()));
                } else {
                    Network.log.info("Document size greater than " + event.getRemoteDocStoredMaxSize()
                            + " kbytes, excludes it from being stored to local index. Url : "
                            + urlEntry.urlstring());
                }
            }

            // after this conversion we can remove the largest and not used field text_t and synonyms_sxt from the document
            // because that goes into a search cache and would take a lot of memory in the search cache
            //doc.removeFields(CollectionSchema.text_t.getSolrFieldName());
            tmpdoc.removeFields(CollectionSchema.synonyms_sxt.getSolrFieldName());

            ResultURLs.stack(ASCII.String(urlEntry.url().hash()), urlEntry.url().getHost(),
                    event.peers.mySeed().hash.getBytes(), UTF8.getBytes(target.hash), EventOrigin.QUERIES);
        }

        // add the url entry to the checked results
        resultContainer.add(urlEntry);
    }
    final int numFound = (int) docList[0].getNumFound();
    docList[0].clear();
    docList[0] = null;
    if (localsearch) {
        event.addNodes(resultContainer, facets, snippets, true, "localpeer", numFound, incrementNavigators);
        event.addFinalize();
        event.addExpectedRemoteReferences(-count);
        Network.log.info("local search (solr): localpeer sent " + resultContainer.size() + "/" + numFound
                + " references");
    } else {
        if (event.addResultsToLocalIndex) {
            /*
            * Current thread might be interrupted by SearchEvent.cleanup()
             */
            if (Thread.interrupted()) {
                throw new InterruptedException("solrQuery interrupted");
            }
            WriteToLocalIndexThread writeToLocalIndexThread = new WriteToLocalIndexThread(
                    event.query.getSegment(), docs); // will clear docs on return
            writeToLocalIndexThread.start();
        }
        event.addNodes(resultContainer, facets, snippets, false, target.getName() + "/" + target.hash, numFound,
                incrementNavigators);
        event.addFinalize();
        event.addExpectedRemoteReferences(-count);
        Network.log.info("remote search (solr): peer " + target.getName() + " sent " + (resultContainer.size())
                + "/" + numFound + " references");
    }
    return resultContainer.size();
}

From source file:com.servoy.j2db.util.Utils.java

public static String getTXTFileContent(File f, Charset charset) {
    if (f != null /* && f.exists() */) {
        if (Thread.currentThread().isInterrupted()) {
            Thread.interrupted(); // reset interrupted flag of current thread, FileChannel.read() will throw an exception for it.
        }//from  w  w w. ja v  a2 s . c o  m
        FileInputStream fis = null;
        try {
            int length = (int) f.length();
            if (f.exists()) {
                fis = new FileInputStream(f);
                FileChannel fc = fis.getChannel();
                ByteBuffer bb = ByteBuffer.allocate(length);
                fc.read(bb);
                bb.rewind();
                CharBuffer cb = charset.decode(bb);
                return cb.toString();
            }
        } catch (Exception e) {
            Debug.error("Error reading txt file: " + f, e); //$NON-NLS-1$
        } finally {
            closeInputStream(fis);
        }
    }
    return null;
}

From source file:org.apache.solr.cloud.ZkController.java

/**
 * Best effort to set DOWN state for all replicas on node.
 * //from  w  w w .  j ava2  s .co  m
 * @param nodeName to operate on
 */
public void publishNodeAsDown(String nodeName) {
    log.debug("Publish node={} as DOWN", nodeName);
    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.DOWNNODE.toLower(),
            ZkStateReader.NODE_NAME_PROP, nodeName);
    try {
        Overseer.getStateUpdateQueue(getZkClient()).offer(Utils.toJSON(m));
    } catch (InterruptedException e) {
        Thread.interrupted();
        log.debug("Publish node as down was interrupted.");
    } catch (Exception e) {
        log.warn("Could not publish node as down: " + e.getMessage());
    }
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

public <K, V> Region<K, V> createVMRegion(String name, RegionAttributes<K, V> p_attrs,
        InternalRegionArguments internalRegionArgs)
        throws RegionExistsException, TimeoutException, IOException, ClassNotFoundException {
    if (getMyId().getVmKind() == DistributionManager.LOCATOR_DM_TYPE) {
        if (!internalRegionArgs.isUsedForMetaRegion() && internalRegionArgs.getInternalMetaRegion() == null) {
            throw new IllegalStateException("Regions can not be created in a locator.");
        }/* w  w w. ja  v  a 2s .c om*/
    }
    stopper.checkCancelInProgress(null);
    LocalRegion.validateRegionName(name, internalRegionArgs);
    RegionAttributes<K, V> attrs = p_attrs;
    attrs = invokeRegionBefore(null, name, attrs, internalRegionArgs);
    if (attrs == null) {
        throw new IllegalArgumentException(
                LocalizedStrings.GemFireCache_ATTRIBUTES_MUST_NOT_BE_NULL.toLocalizedString());
    }

    LocalRegion rgn = null;
    // final boolean getDestroyLock = attrs.getDestroyLockFlag();
    final InputStream snapshotInputStream = internalRegionArgs.getSnapshotInputStream();
    InternalDistributedMember imageTarget = internalRegionArgs.getImageTarget();
    final boolean recreate = internalRegionArgs.getRecreateFlag();

    final boolean isPartitionedRegion = attrs.getPartitionAttributes() != null;
    final boolean isReinitCreate = snapshotInputStream != null || imageTarget != null || recreate;

    final String regionPath = LocalRegion.calcFullPath(name, null);

    try {
        for (;;) {
            getCancelCriterion().checkCancelInProgress(null);

            Future future = null;
            synchronized (this.rootRegions) {
                rgn = (LocalRegion) this.rootRegions.get(name);
                if (rgn != null) {
                    throw new RegionExistsException(rgn);
                }
                // check for case where a root region is being reinitialized and we
                // didn't
                // find a region, i.e. the new region is about to be created

                if (!isReinitCreate) { // fix bug 33523
                    String fullPath = Region.SEPARATOR + name;
                    future = (Future) this.reinitializingRegions.get(fullPath);
                }
                if (future == null) {
                    if (internalRegionArgs.getInternalMetaRegion() != null) {
                        rgn = internalRegionArgs.getInternalMetaRegion();
                    } else if (isPartitionedRegion) {
                        rgn = new PartitionedRegion(name, attrs, null, this, internalRegionArgs);
                    } else {
                        /*
                         * for (String senderId : attrs.getGatewaySenderIds()) { if
                         * (getGatewaySender(senderId) != null && getGatewaySender(senderId).isParallel()) {
                         * throw new IllegalStateException( LocalizedStrings.
                         * AttributesFactory_PARALLELGATEWAYSENDER_0_IS_INCOMPATIBLE_WITH_DISTRIBUTED_REPLICATION
                         * .toLocalizedString(senderId)); } }
                         */
                        if (attrs.getScope().isLocal()) {
                            rgn = new LocalRegion(name, attrs, null, this, internalRegionArgs);
                        } else {
                            rgn = new DistributedRegion(name, attrs, null, this, internalRegionArgs);
                        }
                    }

                    this.rootRegions.put(name, rgn);
                    if (isReinitCreate) {
                        regionReinitialized(rgn);
                    }
                    break;
                }
            } // synchronized

            boolean interrupted = Thread.interrupted();
            try { // future != null
                LocalRegion region = (LocalRegion) future.get(); // wait on Future
                throw new RegionExistsException(region);
            } catch (InterruptedException e) {
                interrupted = true;
            } catch (ExecutionException e) {
                throw new Error(LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString(), e);
            } catch (CancellationException e) {
                // future was cancelled
            } finally {
                if (interrupted)
                    Thread.currentThread().interrupt();
            }
        } // for

        boolean success = false;
        try {
            setRegionByPath(rgn.getFullPath(), rgn);
            rgn.initialize(snapshotInputStream, imageTarget, internalRegionArgs);
            success = true;
        } catch (CancelException e) {
            // don't print a call stack
            throw e;
        } catch (RedundancyAlreadyMetException e) {
            // don't log this
            throw e;
        } catch (final RuntimeException validationException) {
            logger.warn(
                    LocalizedMessage.create(LocalizedStrings.GemFireCache_INITIALIZATION_FAILED_FOR_REGION_0,
                            rgn.getFullPath()),
                    validationException);
            throw validationException;
        } finally {
            if (!success) {
                try {
                    // do this before removing the region from
                    // the root set to fix bug 41982.
                    rgn.cleanupFailedInitialization();
                } catch (VirtualMachineError e) {
                    SystemFailure.initiateFailure(e);
                    throw e;
                } catch (Throwable t) {
                    SystemFailure.checkFailure();
                    stopper.checkCancelInProgress(t);

                    // bug #44672 - log the failure but don't override the original exception
                    logger.warn(LocalizedMessage.create(
                            LocalizedStrings.GemFireCache_INIT_CLEANUP_FAILED_FOR_REGION_0, rgn.getFullPath()),
                            t);

                } finally {
                    // clean up if initialize fails for any reason
                    setRegionByPath(rgn.getFullPath(), null);
                    synchronized (this.rootRegions) {
                        Region r = (Region) this.rootRegions.get(name);
                        if (r == rgn) {
                            this.rootRegions.remove(name);
                        }
                    } // synchronized
                }
            } // success
        }

        rgn.postCreateRegion();
    } catch (RegionExistsException ex) {
        // outside of sync make sure region is initialized to fix bug 37563
        LocalRegion r = (LocalRegion) ex.getRegion();
        r.waitOnInitialization(); // don't give out ref until initialized
        throw ex;
    }

    invokeRegionAfter(rgn);
    /**
     * Added for M&M . Putting the callback here to avoid creating RegionMBean in case of Exception
     **/
    if (!rgn.isInternalRegion()) {
        system.handleResourceEvent(ResourceEvent.REGION_CREATE, rgn);
    }

    return rgn;
}

From source file:v800_trainer.JCicloTronic.java

private void jMenuOpenallActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jMenuOpenallActionPerformed
    // Add your handling code here:

    int i;//from   w w  w .j a  va 2s .  c o m
    StringBuffer Buffer = new StringBuffer();
    String[] liste = new String[1];
    byte Data[] = new byte[81930];
    File path = new File(Properties.getProperty("data.dir"));
    File Datei;

    FileFilter directoryFilter = new FileFilter() {
        public boolean accept(File file) {
            return file.isDirectory();
        }

        public String getDescription() {
            return "";
        };
    };

    chooser.setCurrentDirectory(
            new java.io.File(Properties.getProperty("import.dir", Properties.getProperty("data.dir"))));
    chooser.setDialogType(JFileChooser.OPEN_DIALOG);
    ExampleFileFilter filtera = new ExampleFileFilter();
    ExampleFileFilter filterb = new ExampleFileFilter();
    ExampleFileFilter filterc = new ExampleFileFilter();
    ExampleFileFilter filterd = new ExampleFileFilter();
    ExampleFileFilter filtere = new ExampleFileFilter();

    filtera.addExtension("dat");
    filtera.setDescription("HAC Rohdaten");
    filterb.addExtension("tur");
    filterb.setDescription("Hactronic Dateien");
    filterc.addExtension("hrm");
    filterc.setDescription("Polar Daten");
    filterd.addExtension("");
    filterd.setDescription("Polar V800 Verzeichnis");
    filtere.addExtension("csv");
    filtere.setDescription("Polar V800 CSV Flow export");

    chooser.resetChoosableFileFilters();
    chooser.addChoosableFileFilter(filtera);
    chooser.addChoosableFileFilter(filterb);
    chooser.addChoosableFileFilter(filterc);
    chooser.addChoosableFileFilter(filterd);
    chooser.addChoosableFileFilter(filtere);
    chooser.setFileFilter(filtere);

    chooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);

    int returnVal = chooser.showDialog(this, null);
    if (returnVal == JFileChooser.APPROVE_OPTION) {

        if (chooser.getSelectedFile().getName().endsWith(".dat")) {
            path = new File(chooser.getCurrentDirectory().getPath());
        } else {
            path = new File(chooser.getSelectedFile().getPath());
        }

        if (chooser.getFileFilter().equals(filtera)) {
            liste = path.list(new DirFilter(".dat"));
        }
        if (chooser.getFileFilter().equals(filterb)) {
            liste = path.list(new DirFilter(".tur"));
        }
        if (chooser.getFileFilter().equals(filterc)) {
            liste = path.list(new DirFilter(".hrm"));
        }
        if (chooser.getFileFilter().equals(filtere)) {
            liste = path.list(new DirFilter(".csv"));
        }

        if (chooser.getFileFilter().equals(filterd)) {

            File[] files = path.listFiles();
            ArrayList<String> pathliste = new ArrayList();
            for (File file : files) {
                try {
                    if (file.isDirectory()) {
                        pathliste.add(file.getCanonicalPath());
                    }
                } catch (Exception e) {
                }
            }

            Thread thread = new Thread(new Runnable() {

                public void run() {
                    setCursor(new Cursor(Cursor.WAIT_CURSOR));
                    pm = new ProgressMonitor(JCicloTronic.this, "Importiere...", "", 0, 100);
                    pm.setMillisToPopup(1);

                    v800export V800_export = new v800export();
                    V800_export.export_sessions(JCicloTronic.this, pathliste);
                    pm.close();
                    ChangeModel();
                    setCursor(new Cursor(Cursor.DEFAULT_CURSOR));

                    try {

                        Thread.sleep(100);
                    } catch (Exception e) {
                        if (Thread.interrupted()) {
                            return;
                        }
                    }
                }

            });

            thread.start();

            return;

        }

        //alle auer V800 Dateien importieren
        if (liste == null) {
            JOptionPane.showMessageDialog(null, "Keine Rohdaten-Files gefunden!", "Achtung!",
                    JOptionPane.ERROR_MESSAGE);
            return;
        }
        final String[] liste_final = liste.clone();
        final File path_final = path;

        Thread thread = new Thread(new Runnable() {

            public void run() {
                File Datei;
                byte Data[] = new byte[81930];

                setCursor(new Cursor(Cursor.WAIT_CURSOR));
                pm = new ProgressMonitor(JCicloTronic.this, "Importiere...", "", 0, 100);
                pm.setMillisToPopup(1);

                for (int i = 0; i < liste_final.length; i++) {
                    pm.setProgress((int) 100.0 * i / liste_final.length);
                    pm.setNote(liste_final[i]);

                    try {
                        Eingabedatei = new java.io.FileInputStream(path_final.getPath()
                                + SystemProperties.getProperty("file.separator") + liste_final[i]);

                        try {
                            Datei = new File(path_final.getPath()
                                    + SystemProperties.getProperty("file.separator") + liste_final[i]);
                            Data = new byte[(int) Datei.length()];
                            //                        Eingabedatei.read()

                            Eingabedatei.read(Data);
                            Eingabedatei.close();
                            if (chooser.getFileFilter().equals(filtera)) {
                                ExtractTour(Data);
                            }
                            if (chooser.getFileFilter().equals(filterb)) {
                                ExtractHactronicFile(Data);
                            }
                            if (chooser.getFileFilter().equals(filterc)) {
                                ExtractPolarFile(Data);
                            }
                            if (chooser.getFileFilter().equals(filtere)) {
                                ExtractCSV(Data);
                            }

                        } catch (IOException e) {
                            JOptionPane.showMessageDialog(null, "IO-Fehler bei Datenlesen", "Achtung!",
                                    JOptionPane.ERROR_MESSAGE);
                        }

                    } catch (FileNotFoundException e) {
                        JOptionPane.showMessageDialog(null,
                                "IO-Fehler bei " + path_final.getPath()
                                        + SystemProperties.getProperty("file.separator") + liste_final[i],
                                "Achtung!", JOptionPane.ERROR_MESSAGE);

                    }

                }
                pm.close();
                JOptionPane.showMessageDialog(null, "Daten  Ende", "Achtung!", JOptionPane.ERROR_MESSAGE);

                Properties.setProperty("import.dir", chooser.getCurrentDirectory().getPath());

                ChangeModel();
                setCursor(new Cursor(Cursor.DEFAULT_CURSOR));

                try {

                    Thread.sleep(100);
                } catch (Exception e) {
                    if (Thread.interrupted()) {
                        return;
                    }
                }
            }

        });

        thread.start();

    }
}

From source file:v800_trainer.JCicloTronic.java

private void jMenu_V800_LadenMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jMenu_V800_LadenMouseClicked

    V800_Download_Training V800_read = new V800_Download_Training();
    if (!V800_read.start(this))
        return;//from   w w w .j a v  a 2s  . com
    setCursor(new Cursor(Cursor.WAIT_CURSOR));
    ArrayList<String> sessions = V800_read.get_all_sessions();
    ArrayList<String> NewData = new ArrayList();

    for (int i = 0; i < sessions.size(); i++) {

        String Name = sessions.get(i).replace("/", "");
        Name = Properties.getProperty("data.dir", Properties.getProperty("working.dir"))
                + SystemProperties.getProperty("file.separator") + Name.substring(0, Name.length() - 2)
                + "_Tour.cfg";

        File file = new File(Name);

        if ((file.exists() != true || Integer.parseInt(Properties.getProperty("Daten ueberschreiben")) == 1)) {
            NewData.add(sessions.get(i));
        }

    }

    Thread thread = new Thread(new Runnable() {

        public void run() {
            setCursor(new Cursor(Cursor.WAIT_CURSOR));
            pm = new ProgressMonitor(JCicloTronic.this, "Download...", "", 0, 100);
            pm.setMillisToPopup(0);
            pm.setMillisToDecideToPopup(0);

            ArrayList<String> PathList = V800_read.get_sessions(NewData);

            V800_read.stop();
            //    V800_read = null;

            pm.close();

            v800export V800_export = new v800export();

            pm = new ProgressMonitor(JCicloTronic.this, "Importiere...", "", 0, 100);
            pm.setMillisToPopup(0);
            pm.setMillisToDecideToPopup(0);

            V800_export.export_sessions(JCicloTronic.this, PathList);

            pm.close();
            ChangeModel();

            try {

                Thread.sleep(100);
            } catch (Exception e) {
                if (Thread.interrupted()) {
                    return;
                }
            }
        }

    });

    thread.start();

    ChangeModel();
    setCursor(new Cursor(Cursor.DEFAULT_CURSOR));
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

/**
 * Wait for given sender queue to flush for given timeout.
 * //from  ww w  .j ava2  s.c o m
 * @param id ID of GatewaySender or AsyncEventQueue
 * @param isAsyncListener true if this is for an AsyncEventQueue and false if for a GatewaySender
 * @param maxWaitTime maximum time to wait in seconds; zero or -ve means infinite wait
 * 
 * @return zero if maxWaitTime was not breached, -1 if queue could not be found or is closed, and
 *         elapsed time if timeout was breached
 */
public int waitForSenderQueueFlush(String id, boolean isAsyncListener, int maxWaitTime) {
    getCancelCriterion().checkCancelInProgress(null);
    AbstractGatewaySender gatewaySender = null;
    if (isAsyncListener) {
        AsyncEventQueueImpl asyncQueue = (AsyncEventQueueImpl) getAsyncEventQueue(id);
        if (asyncQueue != null) {
            gatewaySender = (AbstractGatewaySender) asyncQueue.getSender();
        }
    } else {
        gatewaySender = (AbstractGatewaySender) getGatewaySender(id);
    }
    RegionQueue rq;
    final long startTime = System.currentTimeMillis();
    long elapsedTime;
    if (maxWaitTime <= 0) {
        maxWaitTime = Integer.MAX_VALUE;
    }
    while (gatewaySender != null && gatewaySender.isRunning() && (rq = gatewaySender.getQueue()) != null) {
        if (rq.size() == 0) {
            // return zero since it was not a timeout
            return 0;
        }
        try {
            Thread.sleep(500);
            getCancelCriterion().checkCancelInProgress(null);
        } catch (InterruptedException ie) {
            Thread.currentThread().interrupt();
            getCancelCriterion().checkCancelInProgress(ie);
        }
        // clear interrupted flag before retry
        Thread.interrupted();
        elapsedTime = System.currentTimeMillis() - startTime;
        if (elapsedTime >= (maxWaitTime * 1000L)) {
            // return elapsed time
            return (int) (elapsedTime / 1000L);
        }
    }
    return -1;
}

From source file:org.apache.geode.internal.cache.Oplog.java

/**
 * Extracts the Value byte array & UserBit from the OpLog
 * /*from w w  w.  j  ava2 s.  c o m*/
 * @param offsetInOplog The starting position from which to read the data in the opLog
 * @param bitOnly boolean indicating whether the value needs to be extracted along with the
 *        UserBit or not.
 * @param valueLength The length of the byte array which represents the value
 * @param userBits The userBits of the value.
 * @return BytesAndBits object which wraps the extracted value & user bit
 */
private BytesAndBits basicGet(DiskRegionView dr, long offsetInOplog, boolean bitOnly, int valueLength,
        byte userBits) {
    BytesAndBits bb = null;
    if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits) || bitOnly || valueLength == 0) {
        if (EntryBits.isInvalid(userBits)) {
            bb = new BytesAndBits(DiskEntry.INVALID_BYTES, userBits);
        } else if (EntryBits.isTombstone(userBits)) {
            bb = new BytesAndBits(DiskEntry.TOMBSTONE_BYTES, userBits);
        } else {
            bb = new BytesAndBits(DiskEntry.LOCAL_INVALID_BYTES, userBits);
        }
    } else {
        if (offsetInOplog == -1)
            return null;
        try {
            for (;;) {
                dr.getCancelCriterion().checkCancelInProgress(null);
                boolean interrupted = Thread.interrupted();
                try {
                    bb = attemptGet(dr, offsetInOplog, bitOnly, valueLength, userBits);
                    break;
                } catch (InterruptedIOException ignore) { // bug 39756
                    // ignore, we'll clear and retry.
                } finally {
                    if (interrupted) {
                        Thread.currentThread().interrupt();
                    }
                }
            } // for
        } catch (IOException ex) {
            getParent().getCancelCriterion().checkCancelInProgress(ex);
            throw new DiskAccessException(
                    LocalizedStrings.Oplog_FAILED_READING_FROM_0_OPLOGID_1_OFFSET_BEING_READ_2_CURRENT_OPLOG_SIZE_3_ACTUAL_FILE_SIZE_4_IS_ASYNCH_MODE_5_IS_ASYNCH_WRITER_ALIVE_6
                            .toLocalizedString(this.diskFile.getPath(), this.oplogId, offsetInOplog,
                                    this.crf.currSize, this.crf.bytesFlushed, !dr.isSync(), Boolean.FALSE),
                    ex, dr.getName());
        } catch (IllegalStateException ex) {
            checkClosed();
            throw ex;
        }
    }
    return bb;
}