List of usage examples for java.util.concurrent ConcurrentMap get
V get(Object key);
From source file:com.github.podd.utils.test.OntologyUtilsTest.java
@Test public void testImportsOrderFourLevels() throws Exception { final Model model = new LinkedHashModel(); OntologyUtils.ontologyIDsToModel(Arrays.asList(OntologyConstant.testOntologyID), model); model.add(OntologyConstant.testVersionUri1, OWL.IMPORTS, OntologyConstant.testImportOntologyUri1); model.add(OntologyConstant.testImportOntologyUri1, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri1, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri1); model.add(OntologyConstant.testImportVersionUri1, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri2, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri2, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri2); model.add(OntologyConstant.testImportVersionUri2, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri1, OWL.IMPORTS, OntologyConstant.testImportVersionUri2); model.add(OntologyConstant.testImportOntologyUri3, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri3, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri3); model.add(OntologyConstant.testImportVersionUri3, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri2, OWL.IMPORTS, OntologyConstant.testImportVersionUri3); model.add(OntologyConstant.testImportOntologyUri4, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri4, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri4); model.add(OntologyConstant.testImportVersionUri4, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri3, OWL.IMPORTS, OntologyConstant.testImportVersionUri4); final Set<URI> schemaOntologyUris = new LinkedHashSet<URI>(); final Set<URI> schemaVersionUris = new LinkedHashSet<URI>(); schemaOntologyUris.add(OntologyConstant.testOntologyUri1); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri1); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri2); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri3); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri4); schemaVersionUris.add(OntologyConstant.testVersionUri1); schemaVersionUris.add(OntologyConstant.testImportVersionUri1); schemaVersionUris.add(OntologyConstant.testImportVersionUri2); schemaVersionUris.add(OntologyConstant.testImportVersionUri3); schemaVersionUris.add(OntologyConstant.testImportVersionUri4); final ConcurrentMap<URI, Set<URI>> importsMap = new ConcurrentHashMap<URI, Set<URI>>(); // Expected output solution from importsMap after calling orderImports // importsMap.put(testVersionUri1, // Collections.singleton(OntologyConstant.testImportVersionUri1)); // importsMap.put(testImportVersionUri1, // Collections.singleton(OntologyConstant.testImportVersionUri2)); // importsMap.put(testImportVersionUri2, // Collections.singleton(OntologyConstant.testImportVersionUri3)); // importsMap.put(testImportVersionUri3, new // HashSet<URI>(Arrays.asList(OntologyConstant.testImportVersionUri4))); // importsMap.put(testImportVersionUri4, new HashSet<URI>()); final List<URI> orderImports = OntologyUtils.orderImports(model, schemaOntologyUris, schemaVersionUris, importsMap, false);// w ww . j a v a 2 s . com Assert.assertEquals(5, orderImports.size()); Assert.assertEquals(OntologyConstant.testImportVersionUri4, orderImports.get(0)); Assert.assertEquals(OntologyConstant.testImportVersionUri3, orderImports.get(1)); Assert.assertEquals(OntologyConstant.testImportVersionUri2, orderImports.get(2)); Assert.assertEquals(OntologyConstant.testImportVersionUri1, orderImports.get(3)); Assert.assertEquals(OntologyConstant.testVersionUri1, orderImports.get(4)); Assert.assertEquals(5, importsMap.size()); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri4)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri1)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testVersionUri1)); final Set<URI> imports4 = importsMap.get(OntologyConstant.testImportVersionUri4); Assert.assertNotNull(imports4); Assert.assertEquals(0, imports4.size()); final Set<URI> imports3 = importsMap.get(OntologyConstant.testImportVersionUri3); Assert.assertNotNull(imports3); Assert.assertEquals(1, imports3.size()); Assert.assertTrue(imports3.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> imports2 = importsMap.get(OntologyConstant.testImportVersionUri2); Assert.assertNotNull(imports2); Assert.assertEquals(2, imports2.size()); Assert.assertTrue(imports2.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(imports2.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> imports1 = importsMap.get(OntologyConstant.testImportVersionUri1); Assert.assertNotNull(imports1); Assert.assertEquals(3, imports1.size()); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> importsRoot = importsMap.get(OntologyConstant.testVersionUri1); Assert.assertNotNull(importsRoot); Assert.assertEquals(4, importsRoot.size()); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri1)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri4)); }
From source file:io.druid.client.cache.MemcachedCache.java
public static MemcachedCache create(final MemcachedCacheConfig config) { final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>(); final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>(); final AbstractMonitor monitor = new AbstractMonitor() { final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>( new HashMap<String, Long>()); @Override//from ww w . ja v a 2 s . com public boolean doMonitor(ServiceEmitter emitter) { final Map<String, Long> priorValues = this.priorValues.get(); final Map<String, Long> currentValues = getCurrentValues(); final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder(); for (Map.Entry<String, Long> entry : currentValues.entrySet()) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/total", entry.getValue())); final Long prior = priorValues.get(entry.getKey()); if (prior != null) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/delta", entry.getValue() - prior)); } } if (!this.priorValues.compareAndSet(priorValues, currentValues)) { log.error("Prior value changed while I was reporting! updating anyways"); this.priorValues.set(currentValues); } return true; } private Map<String, Long> getCurrentValues() { final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } return builder.build(); } }; try { LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize()); // always use compression transcoder.setCompressionThreshold(0); OperationQueueFactory opQueueFactory; long maxQueueBytes = config.getMaxOperationQueueSize(); if (maxQueueBytes > 0) { opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes); } else { opQueueFactory = new LinkedOperationQueueFactory(); } final Predicate<String> interesting = new Predicate<String>() { // See net.spy.memcached.MemcachedConnection.registerMetrics() private final Set<String> interestingMetrics = ImmutableSet.of( "[MEM] Reconnecting Nodes (ReconnectQueue)", //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write", "[MEM] Average Bytes read from OS per read", "[MEM] Average Time on wire for operations (s)", "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry", "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success"); @Override public boolean apply(@Nullable String input) { return input != null && interestingMetrics.contains(input); } }; final MetricCollector metricCollector = new MetricCollector() { @Override public void addCounter(String name) { if (!interesting.apply(name)) { return; } counters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Add Counter [%s]", name); } } @Override public void removeCounter(String name) { if (log.isDebugEnabled()) { log.debug("Ignoring request to remove [%s]", name); } } @Override public void incrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment [%s]", name); } } @Override public void incrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.addAndGet(amount); if (log.isDebugEnabled()) { log.debug("Increment [%s] %d", name, amount); } } @Override public void decrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.decrementAndGet(); if (log.isDebugEnabled()) { log.debug("Decrement [%s]", name); } } @Override public void decrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0L)); counter = counters.get(name); } counter.addAndGet(-amount); if (log.isDebugEnabled()) { log.debug("Decrement [%s] %d", name, amount); } } @Override public void addMeter(String name) { if (!interesting.apply(name)) { return; } meters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Adding meter [%s]", name); } } @Override public void removeMeter(String name) { if (!interesting.apply(name)) { return; } if (log.isDebugEnabled()) { log.debug("Ignoring request to remove meter [%s]", name); } } @Override public void markMeter(String name) { if (!interesting.apply(name)) { return; } AtomicLong meter = meters.get(name); if (meter == null) { meters.putIfAbsent(name, new AtomicLong(0L)); meter = meters.get(name); } meter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment counter [%s]", name); } } @Override public void addHistogram(String name) { log.debug("Ignoring add histogram [%s]", name); } @Override public void removeHistogram(String name) { log.debug("Ignoring remove histogram [%s]", name); } @Override public void updateHistogram(String name, int amount) { log.debug("Ignoring update histogram [%s]: %d", name, amount); } }; final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder() // 1000 repetitions gives us good distribution with murmur3_128 // (approx < 5% difference in counts across nodes, with 5 cache nodes) .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128) .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY) .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true) .setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true) .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout()) .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory) .setMetricCollector(metricCollector).setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds .build(); final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts()); final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier; if (config.getNumConnections() > 1) { clientSupplier = new LoadBalancingPool<MemcachedClientIF>(config.getNumConnections(), new Supplier<MemcachedClientIF>() { @Override public MemcachedClientIF get() { try { return new MemcachedClient(connectionFactory, hosts); } catch (IOException e) { log.error(e, "Unable to create memcached client"); throw Throwables.propagate(e); } } }); } else { clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder .<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts))); } return new MemcachedCache(clientSupplier, config, monitor); } catch (IOException e) { throw Throwables.propagate(e); } }
From source file:org.apache.druid.client.cache.MemcachedCache.java
public static MemcachedCache create(final MemcachedCacheConfig config) { final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>(); final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>(); final AbstractMonitor monitor = new AbstractMonitor() { final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>( new HashMap<String, Long>()); @Override//w w w .j av a 2s. c o m public boolean doMonitor(ServiceEmitter emitter) { final Map<String, Long> priorValues = this.priorValues.get(); final Map<String, Long> currentValues = getCurrentValues(); final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder(); for (Map.Entry<String, Long> entry : currentValues.entrySet()) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/total", entry.getValue())); final Long prior = priorValues.get(entry.getKey()); if (prior != null) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/delta", entry.getValue() - prior)); } } if (!this.priorValues.compareAndSet(priorValues, currentValues)) { log.error("Prior value changed while I was reporting! updating anyways"); this.priorValues.set(currentValues); } return true; } private Map<String, Long> getCurrentValues() { final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } return builder.build(); } }; try { LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize()); // always use compression transcoder.setCompressionThreshold(0); OperationQueueFactory opQueueFactory; long maxQueueBytes = config.getMaxOperationQueueSize(); if (maxQueueBytes > 0) { opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes); } else { opQueueFactory = new LinkedOperationQueueFactory(); } final Predicate<String> interesting = new Predicate<String>() { // See net.spy.memcached.MemcachedConnection.registerMetrics() private final Set<String> interestingMetrics = ImmutableSet.of( "[MEM] Reconnecting Nodes (ReconnectQueue)", //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write", "[MEM] Average Bytes read from OS per read", "[MEM] Average Time on wire for operations (s)", "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry", "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success"); @Override public boolean apply(@Nullable String input) { return input != null && interestingMetrics.contains(input); } }; final MetricCollector metricCollector = new MetricCollector() { @Override public void addCounter(String name) { if (!interesting.apply(name)) { return; } counters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Add Counter [%s]", name); } } @Override public void removeCounter(String name) { if (log.isDebugEnabled()) { log.debug("Ignoring request to remove [%s]", name); } } @Override public void incrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment [%s]", name); } } @Override public void incrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.addAndGet(amount); if (log.isDebugEnabled()) { log.debug("Increment [%s] %d", name, amount); } } @Override public void decrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.decrementAndGet(); if (log.isDebugEnabled()) { log.debug("Decrement [%s]", name); } } @Override public void decrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0L)); counter = counters.get(name); } counter.addAndGet(-amount); if (log.isDebugEnabled()) { log.debug("Decrement [%s] %d", name, amount); } } @Override public void addMeter(String name) { if (!interesting.apply(name)) { return; } meters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Adding meter [%s]", name); } } @Override public void removeMeter(String name) { if (!interesting.apply(name)) { return; } if (log.isDebugEnabled()) { log.debug("Ignoring request to remove meter [%s]", name); } } @Override public void markMeter(String name) { if (!interesting.apply(name)) { return; } AtomicLong meter = meters.get(name); if (meter == null) { meters.putIfAbsent(name, new AtomicLong(0L)); meter = meters.get(name); } meter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment counter [%s]", name); } } @Override public void addHistogram(String name) { log.debug("Ignoring add histogram [%s]", name); } @Override public void removeHistogram(String name) { log.debug("Ignoring remove histogram [%s]", name); } @Override public void updateHistogram(String name, int amount) { log.debug("Ignoring update histogram [%s]: %d", name, amount); } }; final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder() // 1000 repetitions gives us good distribution with murmur3_128 // (approx < 5% difference in counts across nodes, with 5 cache nodes) .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128) .setProtocol(ConnectionFactoryBuilder.Protocol .valueOf(StringUtils.toUpperCase(config.getProtocol()))) .setLocatorType( ConnectionFactoryBuilder.Locator.valueOf(StringUtils.toUpperCase(config.getLocator()))) .setDaemon(true).setFailureMode(FailureMode.Cancel).setTranscoder(transcoder) .setShouldOptimize(true).setOpQueueMaxBlockTime(config.getTimeout()) .setOpTimeout(config.getTimeout()).setReadBufferSize(config.getReadBufferSize()) .setOpQueueFactory(opQueueFactory).setMetricCollector(metricCollector) .setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds .build(); final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts()); final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier; if (config.getNumConnections() > 1) { clientSupplier = new MemcacheClientPool(config.getNumConnections(), new Supplier<MemcachedClientIF>() { @Override public MemcachedClientIF get() { try { return new MemcachedClient(connectionFactory, hosts); } catch (IOException e) { log.error(e, "Unable to create memcached client"); throw Throwables.propagate(e); } } }); } else { clientSupplier = Suppliers .ofInstance(StupidResourceHolder.create(new MemcachedClient(connectionFactory, hosts))); } return new MemcachedCache(clientSupplier, config, monitor); } catch (IOException e) { throw Throwables.propagate(e); } }
From source file:com.github.podd.example.ExamplePoddClient.java
public Map<Path, String> uploadToStorage(final List<Path> bagsToUpload, final String sshServerFingerprint, final String sshHost, final int portNo, final String username, final Path pathToPublicKey, final Path localRootPath, final Path remoteRootPath, final PasswordFinder keyExtractor) throws PoddClientException, NoSuchAlgorithmException, IOException { final Map<Path, String> results = new ConcurrentHashMap<>(); final ConcurrentMap<Path, ConcurrentMap<PoddDigestUtils.Algorithm, String>> digests = PoddDigestUtils .getDigests(bagsToUpload);// ww w .java2 s. c o m try (SSHClient sshClient = new SSHClient(ExamplePoddClient.DEFAULT_CONFIG);) { sshClient.useCompression(); sshClient.addHostKeyVerifier(sshServerFingerprint); sshClient.connect(sshHost, portNo); if (!Files.exists(pathToPublicKey)) { throw new PoddClientException("Could not find public key: " + pathToPublicKey); } if (!SecurityUtils.isBouncyCastleRegistered()) { throw new PoddClientException("Bouncy castle needed"); } final FileKeyProvider rsa = new PKCS8KeyFile(); rsa.init(pathToPublicKey.toFile(), keyExtractor); sshClient.authPublickey(username, rsa); // Session session = sshClient.startSession(); try (SFTPClient sftp = sshClient.newSFTPClient();) { for (final Path nextBag : bagsToUpload) { // Check to make sure that the bag was under the local root path final Path localPath = nextBag.toAbsolutePath(); if (!localPath.startsWith(localRootPath)) { this.log.error( "Local bag path was not a direct descendant of the local root path: {} {} {}", localRootPath, nextBag, localPath); throw new PoddClientException( "Local bag path was not a direct descendant of the local root path: " + localPath + " " + localRootPath); } // Take the local root path out to get the subpath to use on the remote final Path remoteSubPath = localPath.subpath(localRootPath.getNameCount(), nextBag.getNameCount() - 1); this.log.info("Remote sub path: {}", remoteSubPath); final Path remoteDirPath = remoteRootPath.resolve(remoteSubPath); this.log.info("Remote dir path: {}", remoteDirPath); final Path remoteBagPath = remoteDirPath.resolve(nextBag.getFileName()); this.log.info("Remote bag path: {}", remoteBagPath); boolean fileFound = false; boolean sizeCorrect = false; try { // check details of a remote bag final FileAttributes attribs = sftp.lstat(remoteBagPath.toAbsolutePath().toString()); final long localSize = Files.size(nextBag); final long remoteSize = attribs.getSize(); if (localSize <= 0) { this.log.error("Local bag was empty: {}", nextBag); sizeCorrect = false; fileFound = false; } else if (remoteSize <= 0) { this.log.warn("Remote bag was empty: {} {}", nextBag, attribs); sizeCorrect = false; fileFound = false; } else if (localSize == remoteSize) { this.log.info("Found file on remote already with same size as local: {} {}", nextBag, remoteBagPath); sizeCorrect = true; fileFound = true; } else { sizeCorrect = false; fileFound = true; // We always assume that a non-zero local file is correct // The bags contain time-stamps that will be modified when they are // regenerated, likely changing the file-size, and hopefully changing // the digest checksums // throw new PoddClientException( // "Could not automatically compare file sizes (need manual intervention to delete one) : " // + nextBag + " " + remoteBagPath + " localSize=" + localSize // + " remoteSize=" + remoteSize); } } catch (final IOException e) { // lstat() throws an IOException if the file does not exist // Ignore sizeCorrect = false; fileFound = false; } final ConcurrentMap<Algorithm, String> bagDigests = digests.get(nextBag); if (bagDigests.isEmpty()) { this.log.error("No bag digests were generated for bag: {}", nextBag); } for (final Entry<Algorithm, String> entry : bagDigests.entrySet()) { final Path localDigestPath = localPath .resolveSibling(localPath.getFileName() + entry.getKey().getExtension()); // Create the local digest file Files.copy( new ReaderInputStream(new StringReader(entry.getValue()), StandardCharsets.UTF_8), localDigestPath); final Path remoteDigestPath = remoteBagPath .resolveSibling(remoteBagPath.getFileName() + entry.getKey().getExtension()); boolean nextDigestFileFound = false; boolean nextDigestCorrect = false; try { final Path tempFile = Files.createTempFile("podd-digest-", entry.getKey().getExtension()); final SFTPFileTransfer sftpFileTransfer = new SFTPFileTransfer(sftp.getSFTPEngine()); sftpFileTransfer.download(remoteBagPath.toAbsolutePath().toString(), tempFile.toAbsolutePath().toString()); nextDigestFileFound = true; final List<String> allLines = Files.readAllLines(tempFile, StandardCharsets.UTF_8); if (allLines.isEmpty()) { nextDigestCorrect = false; } else if (allLines.size() > 1) { nextDigestCorrect = false; } // Check if the digests match exactly else if (allLines.get(0).equals(entry.getValue())) { nextDigestCorrect = true; } else { nextDigestCorrect = false; } } catch (final IOException e) { nextDigestFileFound = false; nextDigestCorrect = false; } if (nextDigestFileFound && nextDigestCorrect) { this.log.info( "Not copying digest to remote as it exists and contains the same content as the local digest"); } else if (nextDigestFileFound && !nextDigestCorrect) { this.log.error("Found remote digest but content was not correct: {} {}", localDigestPath, remoteDigestPath); sftp.rm(remoteDigestPath.toString()); this.log.info("Copying digest to remote: {}", remoteDigestPath); sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString()); } else if (!nextDigestFileFound) { this.log.info("About to make directories on remote: {}", remoteDirPath); sftp.mkdirs(remoteDirPath.toString()); this.log.info("Copying digest to remote: {}", remoteDigestPath); sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString()); } } if (fileFound && sizeCorrect) { this.log.info("Not copying bag to remote as it exists and is the same size as local bag"); } else if (fileFound && !sizeCorrect) { this.log.error("Found remote bag but size was not correct: {} {}", nextBag, remoteBagPath); sftp.rm(remoteBagPath.toString()); this.log.info("Copying bag to remote: {}", remoteBagPath); sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString()); } else if (!fileFound) { this.log.info("About to make directories on remote: {}", remoteDirPath); sftp.mkdirs(remoteDirPath.toString()); this.log.info("Copying bag to remote: {}", remoteBagPath); sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString()); } } } } catch (final IOException e) { throw new PoddClientException("Could not copy a bag to the remote location", e); } return results; }
From source file:com.github.podd.example.ExamplePoddClient.java
/** * Generates the RDF triples necessary for the given TrayScan parameters and adds the details to * the relevant model in the upload queue. * // w w w.ja va 2 s.c om * @param projectUriMap * A map of relevant project URIs and their artifact identifiers using their * standardised labels. * @param experimentUriMap * A map of relevant experiment URIs and their project URIs using their standardised * labels. * @param trayUriMap * A map of relevant tray URIs and their experiment URIs using their standardised * labels. * @param potUriMap * A map of relevant pot URIs and their tray URIs using their standardised labels. * @param uploadQueue * The upload queue containing all of the models to be uploaded. * @param projectYear * The TrayScan parameter detailing the project year for the next tray. * @param projectNumber * The TrayScan parameter detailing the project number for the next tray. * @param experimentNumber * The TrayScan parameter detailing the experiment number for the next tray. * @param plantName * The name of this plant * @param plantNotes * Specific notes about this plant * @param species * The species for the current line. * @param genus * The genus for the current line. * @throws PoddClientException * If there is a PODD Client exception. * @throws GraphUtilException * If there was an illformed graph. */ public void generateTrayRDF( final ConcurrentMap<String, ConcurrentMap<URI, InferredOWLOntologyID>> projectUriMap, final ConcurrentMap<String, ConcurrentMap<URI, URI>> experimentUriMap, final ConcurrentMap<String, ConcurrentMap<URI, URI>> trayUriMap, final ConcurrentMap<String, ConcurrentMap<URI, URI>> potUriMap, final ConcurrentMap<URI, ConcurrentMap<URI, Model>> materialUriMap, final ConcurrentMap<URI, ConcurrentMap<URI, Model>> genotypeUriMap, final ConcurrentMap<InferredOWLOntologyID, Model> uploadQueue, final ExampleCSVLine nextLine) throws PoddClientException, GraphUtilException { Objects.requireNonNull(nextLine, "Line was null"); Objects.requireNonNull(nextLine.projectID, "ProjectID in line was null"); final Map<URI, InferredOWLOntologyID> projectDetails = this.getProjectDetails(projectUriMap, nextLine.projectID); final URI nextProjectUri = projectDetails.keySet().iterator().next(); final InferredOWLOntologyID nextProjectID = projectDetails.get(nextProjectUri); this.log.debug("Found PODD Project name to URI mapping: {} {}", nextLine.projectID, projectDetails); final Map<URI, URI> experimentDetails = this.getExperimentDetails(nextLine.experimentID); final URI nextExperimentUri = experimentDetails.keySet().iterator().next(); // Create or find an existing model for the necessary modifications to this // project/artifact Model nextResult = new LinkedHashModel(); final Model putIfAbsent = uploadQueue.putIfAbsent(nextProjectID, nextResult); if (putIfAbsent != null) { nextResult = putIfAbsent; } final URI nextTrayUri = this.getTrayUri(trayUriMap, nextLine.trayID, nextProjectID, nextExperimentUri); // Check whether plantId already has an assigned URI final URI nextPotUri = this.getPotUri(potUriMap, nextLine.plantID, nextProjectID, nextTrayUri); // Check whether genus/specieis/plantName already has an assigned URI (and automatically // assign a temporary URI if it does not) final URI nextGenotypeUri = this.getGenotypeUri(genotypeUriMap, nextLine.genus, nextLine.species, nextLine.plantName, nextLine.plantLineNumber, nextLine.control, nextProjectID, nextProjectUri); // // Check whether the material for the given genotype for the given pot already has an // assigned URI (and automatically // // assign a temporary URI if it does not) final URI nextMaterialUri = this.getMaterialUri(materialUriMap, nextGenotypeUri, nextProjectID, nextPotUri, nextLine.potNumber, nextLine.plantLineNumber, nextLine.control); // Add new poddScience:Container for tray nextResult.add(nextTrayUri, RDF.TYPE, PODD.PODD_SCIENCE_TRAY); // Link tray to experiment nextResult.add(nextExperimentUri, PODD.PODD_SCIENCE_HAS_TRAY, nextTrayUri); // TrayID => Add poddScience:hasBarcode to tray nextResult.add(nextTrayUri, PODD.PODD_SCIENCE_HAS_BARCODE, RestletPoddClientImpl.vf.createLiteral(nextLine.trayID)); // TrayNotes => Add rdfs:label to tray nextResult.add(nextTrayUri, RDFS.LABEL, RestletPoddClientImpl.vf.createLiteral(nextLine.trayNotes)); // TrayTypeName => Add poddScience:hasTrayType to tray nextResult.add(nextTrayUri, PODD.PODD_SCIENCE_HAS_TRAY_TYPE, RestletPoddClientImpl.vf.createLiteral(nextLine.trayTypeName)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_TRAY_NUMBER, RestletPoddClientImpl.vf.createLiteral(nextLine.trayNumber, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_TRAY_ROW_NUMBER, RestletPoddClientImpl.vf.createLiteral(nextLine.trayRowNumber, XMLSchema.STRING)); // Add new poddScience:Container for pot nextResult.add(nextPotUri, RDF.TYPE, PODD.PODD_SCIENCE_POT); // Link pot to tray nextResult.add(nextTrayUri, PODD.PODD_SCIENCE_HAS_POT, nextPotUri); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_MATERIAL, nextMaterialUri); // PlantID => Add poddScience:hasBarcode to pot nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_BARCODE, RestletPoddClientImpl.vf.createLiteral(nextLine.plantID)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_NUMBER, RestletPoddClientImpl.vf.createLiteral(nextLine.potNumber, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_TYPE, RestletPoddClientImpl.vf.createLiteral(nextLine.potType, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_NUMBER_OVERALL, RestletPoddClientImpl.vf.createLiteral(nextLine.columnNumber, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_NUMBER_REPLICATE, RestletPoddClientImpl.vf.createLiteral(nextLine.columnNumberRep, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_NUMBER_TRAY, RestletPoddClientImpl.vf.createLiteral(nextLine.columnNumberTray, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_LETTER_TRAY, RestletPoddClientImpl.vf.createLiteral(nextLine.columnLetter)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_POSITION_TRAY, RestletPoddClientImpl.vf.createLiteral(nextLine.position)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_NUMBER_TRAY, RestletPoddClientImpl.vf.createLiteral(nextLine.potNumberTray, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_NUMBER_REPLICATE, RestletPoddClientImpl.vf.createLiteral(nextLine.potReplicateNumber, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_REPLICATE, RestletPoddClientImpl.vf.createLiteral(nextLine.replicateNumber, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_ROW_NUMBER_REPLICATE, RestletPoddClientImpl.vf.createLiteral(nextLine.rowNumberRep, XMLSchema.STRING)); nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_ROW_NUMBER_TRAY, RestletPoddClientImpl.vf.createLiteral(nextLine.rowNumberTray, XMLSchema.STRING)); if (nextGenotypeUri.stringValue().startsWith(RestletPoddClientImpl.TEMP_UUID_PREFIX)) { // Add all of the statements for the genotype to the update to make sure that temporary // descriptions are added nextResult.addAll(genotypeUriMap.get(nextProjectUri).get(nextGenotypeUri)); } if (nextMaterialUri.stringValue().startsWith(RestletPoddClientImpl.TEMP_UUID_PREFIX)) { // Add all of the statements for the genotype to the update to make sure that temporary // descriptions are added nextResult.addAll(materialUriMap.get(nextPotUri).get(nextMaterialUri)); } String potLabel; // PlantNotes => Add rdfs:label to pot if (nextLine.plantNotes == null || nextLine.plantNotes.isEmpty()) { potLabel = "Pot " + nextLine.plantName; } else { potLabel = "Pot " + nextLine.plantName + " : " + nextLine.plantNotes; } nextResult.add(nextPotUri, RDFS.LABEL, RestletPoddClientImpl.vf.createLiteral(potLabel)); }
From source file:com.bytelightning.opensource.pokerface.PokerFace.java
/** * Configures all the needed components, but does not actually start the server. * @param config Contains all information needed to fully wire up the http, https, and httpclient components of this reverse proxy. * @throws Exception Yeah, a lot can go wrong here, but at least it will be caught immediately :-) *//* w w w .j av a 2s . c o m*/ public void config(HierarchicalConfiguration config) throws Exception { List<HierarchicalConfiguration> lconf; HttpAsyncRequester executor = null; BasicNIOConnPool connPool = null; ObjectPool<ByteBuffer> byteBufferPool = null; LinkedHashMap<String, TargetDescriptor> mappings = null; ConcurrentMap<String, HttpHost> hosts = null; handlerRegistry = new UriHttpAsyncRequestHandlerMapper(); // Initialize the keystore (if one was specified) KeyStore keystore = null; char[] keypass = null; String keystoreUri = config.getString("keystore"); if ((keystoreUri != null) && (keystoreUri.trim().length() > 0)) { Path keystorePath = Utils.MakePath(keystoreUri); if (!Files.exists(keystorePath)) throw new ConfigurationException("Keystore does not exist."); if (Files.isDirectory(keystorePath)) throw new ConfigurationException("Keystore is not a file"); String storepass = config.getString("storepass"); if ((storepass != null) && "null".equals(storepass)) storepass = null; keystore = KeyStore.getInstance(KeyStore.getDefaultType()); try (InputStream keyStoreStream = Files.newInputStream(keystorePath)) { keystore.load(keyStoreStream, storepass == null ? null : storepass.trim().toCharArray()); } catch (IOException ex) { Logger.error("Unable to load https server keystore from " + keystoreUri); return; } keypass = config.getString("keypass").trim().toCharArray(); } // Wire up the listening reactor lconf = config.configurationsAt("server"); if ((lconf == null) || (lconf.size() != 1)) throw new ConfigurationException("One (and only one) server configuration element is allowed."); else { Builder builder = IOReactorConfig.custom(); builder.setIoThreadCount(ComputeReactorProcessors(config.getDouble("server[@cpu]", 0.667))); builder.setSoTimeout(config.getInt("server[@soTimeout]", 0)); builder.setSoLinger(config.getInt("server[@soLinger]", -1)); builder.setSoReuseAddress(true); builder.setTcpNoDelay(false); builder.setSelectInterval(100); IOReactorConfig rconfig = builder.build(); Logger.info("Configuring server with options: " + rconfig.toString()); listeningReactor = new DefaultListeningIOReactor(rconfig); lconf = config.configurationsAt("server.listen"); InetSocketAddress addr; boolean hasNonWildcardSecure = false; LinkedHashMap<SocketAddress, SSLContext> addrSSLContext = new LinkedHashMap<SocketAddress, SSLContext>(); if ((lconf == null) || (lconf.size() == 0)) { addr = new InetSocketAddress("127.0.0.1", 8080); ListenerEndpoint ep = listeningReactor.listen(addr); Logger.warn("Configured " + ep.getAddress()); } else { TrustManager[] trustManagers = null; KeyManagerFactory kmf = null; // Create all the specified listeners. for (HierarchicalConfiguration hc : lconf) { String addrStr = hc.getString("[@address]"); if ((addrStr == null) || (addrStr.length() == 0)) addrStr = "0.0.0.0"; String alias = hc.getString("[@alias]"); int port = hc.getInt("[@port]", alias != null ? 443 : 80); addr = new InetSocketAddress(addrStr, port); ListenerEndpoint ep = listeningReactor.listen(addr); String protocol = hc.containsKey("[@protocol]") ? hc.getString("[@protocol]") : null; Boolean secure = hc.containsKey("[@secure]") ? hc.getBoolean("[@secure]") : null; if ((alias != null) && (secure == null)) secure = true; if ((protocol != null) && (secure == null)) secure = true; if ((secure != null) && secure) { if (protocol == null) protocol = "TLS"; if (keystore == null) throw new ConfigurationException( "An https listening socket was requested, but no keystore was specified."); if (kmf == null) { kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); kmf.init(keystore, keypass); } // Are we going to trust all clients or just specific ones? if (hc.getBoolean("[@trustAny]", true)) trustManagers = new TrustManager[] { new X509TrustAllManager() }; else { TrustManagerFactory instance = TrustManagerFactory .getInstance(TrustManagerFactory.getDefaultAlgorithm()); instance.init(keystore); trustManagers = instance.getTrustManagers(); } KeyManager[] keyManagers = kmf.getKeyManagers(); if (alias != null) for (int i = 0; i < keyManagers.length; i++) { if (keyManagers[i] instanceof X509ExtendedKeyManager) keyManagers[i] = new PokerFaceKeyManager(alias, (X509ExtendedKeyManager) keyManagers[i]); } SSLContext sslCtx = SSLContext.getInstance(protocol); sslCtx.init(keyManagers, trustManagers, new SecureRandom()); if (addr.getAddress().isAnyLocalAddress()) { // This little optimization helps us respond faster for every connection as we don't have to extrapolate a local connection address to wild card. for (Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces(); en .hasMoreElements();) { NetworkInterface intf = en.nextElement(); for (Enumeration<InetAddress> enumIpAddr = intf.getInetAddresses(); enumIpAddr .hasMoreElements();) { addr = new InetSocketAddress(enumIpAddr.nextElement(), port); addrSSLContext.put(addr, sslCtx); } } } else { addrSSLContext.put(addr, sslCtx); hasNonWildcardSecure = true; } } Logger.warn("Configured " + (alias == null ? "" : (protocol + " on")) + ep.getAddress()); } } // We will need an HTTP protocol processor for the incoming connections String serverAgent = config.getString("server.serverAgent", "PokerFace/" + Utils.Version); HttpProcessor inhttpproc = new ImmutableHttpProcessor( new HttpResponseInterceptor[] { new ResponseDateInterceptor(), new ResponseServer(serverAgent), new ResponseContent(), new ResponseConnControl() }); HttpAsyncService serviceHandler = new HttpAsyncService(inhttpproc, new DefaultConnectionReuseStrategy(), null, handlerRegistry, null) { public void exception(final NHttpServerConnection conn, final Exception cause) { Logger.warn(cause.getMessage()); super.exception(conn, cause); } }; if (addrSSLContext.size() > 0) { final SSLContext defaultCtx = addrSSLContext.values().iterator().next(); final Map<SocketAddress, SSLContext> sslMap; if ((!hasNonWildcardSecure) || (addrSSLContext.size() == 1)) sslMap = null; else sslMap = addrSSLContext; listeningDispatcher = new DefaultHttpServerIODispatch(serviceHandler, new SSLNHttpServerConnectionFactory(defaultCtx, null, ConnectionConfig.DEFAULT) { protected SSLIOSession createSSLIOSession(IOSession iosession, SSLContext sslcontext, SSLSetupHandler sslHandler) { SSLIOSession retVal; SSLContext sktCtx = sslcontext; if (sslMap != null) { SocketAddress la = iosession.getLocalAddress(); if (la != null) { sktCtx = sslMap.get(la); if (sktCtx == null) sktCtx = sslcontext; } retVal = new SSLIOSession(iosession, SSLMode.SERVER, sktCtx, sslHandler); } else retVal = super.createSSLIOSession(iosession, sktCtx, sslHandler); if (sktCtx != null) retVal.setAttribute("com.bytelightning.opensource.pokerface.secure", true); return retVal; } }); } else listeningDispatcher = new DefaultHttpServerIODispatch(serviceHandler, ConnectionConfig.DEFAULT); } // Configure the httpclient reactor that will be used to do reverse proxing to the specified targets. lconf = config.configurationsAt("targets"); if ((lconf != null) && (lconf.size() > 0)) { HierarchicalConfiguration conf = lconf.get(0); Builder builder = IOReactorConfig.custom(); builder.setIoThreadCount(ComputeReactorProcessors(config.getDouble("targets[@cpu]", 0.667))); builder.setSoTimeout(conf.getInt("targets[@soTimeout]", 0)); builder.setSoLinger(config.getInt("targets[@soLinger]", -1)); builder.setConnectTimeout(conf.getInt("targets[@connectTimeout]", 0)); builder.setSoReuseAddress(true); builder.setTcpNoDelay(false); connectingReactor = new DefaultConnectingIOReactor(builder.build()); final int bufferSize = conf.getInt("targets[@bufferSize]", 1024) * 1024; byteBufferPool = new SoftReferenceObjectPool<ByteBuffer>(new BasePooledObjectFactory<ByteBuffer>() { @Override public ByteBuffer create() throws Exception { return ByteBuffer.allocateDirect(bufferSize); } @Override public PooledObject<ByteBuffer> wrap(ByteBuffer buffer) { return new DefaultPooledObject<ByteBuffer>(buffer); } }); KeyManager[] keyManagers = null; TrustManager[] trustManagers = null; if (keystore != null) { KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); kmf.init(keystore, keypass); keyManagers = kmf.getKeyManagers(); } // Will the httpclient's trust any remote target, or only specific ones. if (conf.getBoolean("targets[@trustAny]", false)) trustManagers = new TrustManager[] { new X509TrustAllManager() }; else if (keystore != null) { TrustManagerFactory instance = TrustManagerFactory .getInstance(TrustManagerFactory.getDefaultAlgorithm()); instance.init(keystore); trustManagers = instance.getTrustManagers(); } SSLContext clientSSLContext = SSLContext.getInstance(conf.getString("targets[@protocol]", "TLS")); clientSSLContext.init(keyManagers, trustManagers, new SecureRandom()); // Setup an SSL capable connection pool for the httpclients. connPool = new BasicNIOConnPool(connectingReactor, new BasicNIOConnFactory(clientSSLContext, null, ConnectionConfig.DEFAULT), conf.getInt("targets[@connectTimeout]", 0)); connPool.setMaxTotal(conf.getInt("targets[@connMaxTotal]", 1023)); connPool.setDefaultMaxPerRoute(conf.getInt("targets[@connMaxPerRoute]", 1023)); // Set up HTTP protocol processor for outgoing connections String userAgent = conf.getString("targets.userAgent", "PokerFace/" + Utils.Version); HttpProcessor outhttpproc = new ImmutableHttpProcessor(new HttpRequestInterceptor[] { new RequestContent(), new RequestTargetHost(), new RequestConnControl(), new RequestUserAgent(userAgent), new RequestExpectContinue(true) }); executor = new HttpAsyncRequester(outhttpproc, new DefaultConnectionReuseStrategy()); // Now set up all the configured targets. mappings = new LinkedHashMap<String, TargetDescriptor>(); hosts = new ConcurrentHashMap<String, HttpHost>(); String[] scheme = { null }; String[] host = { null }; int[] port = { 0 }; String[] path = { null }; int[] stripPrefixCount = { 0 }; for (HierarchicalConfiguration targetConfig : conf.configurationsAt("target")) { String match = targetConfig.getString("[@pattern]"); if ((match == null) || (match.trim().length() < 1)) { Logger.error("Unable to configure target; Invalid url match pattern"); continue; } String key = RequestForTargetConsumer.UriToTargetKey(targetConfig.getString("[@url]"), scheme, host, port, path, stripPrefixCount); if (key == null) { Logger.error("Unable to configure target"); continue; } HttpHost targetHost = hosts.get(key); if (targetHost == null) { targetHost = new HttpHost(host[0], port[0], scheme[0]); hosts.put(key, targetHost); } TargetDescriptor desc = new TargetDescriptor(targetHost, path[0], stripPrefixCount[0]); mappings.put(match, desc); } connectionDispatcher = new DefaultHttpClientIODispatch(new HttpAsyncRequestExecutor(), ConnectionConfig.DEFAULT); } // Allocate the script map which will be populated by it's own executor thread. if (config.containsKey("scripts.rootDirectory")) { Path tmp = Utils.MakePath(config.getProperty("scripts.rootDirectory")); if (!Files.exists(tmp)) throw new FileNotFoundException("Scripts directory does not exist."); if (!Files.isDirectory(tmp)) throw new FileNotFoundException("'scripts' path is not a directory."); scripts = new ConcurrentSkipListMap<String, ScriptObjectMirror>(); boolean watch = config.getBoolean("scripts.dynamicWatch", false); List<Path> jsLibs; Object prop = config.getProperty("scripts.library"); if (prop != null) { jsLibs = new ArrayList<Path>(); if (prop instanceof Collection<?>) { @SuppressWarnings("unchecked") Collection<Object> oprop = (Collection<Object>) prop; for (Object obj : oprop) jsLibs.add(Utils.MakePath(obj)); } else { jsLibs.add(Utils.MakePath(prop)); } } else jsLibs = null; lconf = config.configurationsAt("scripts.scriptConfig"); if (lconf != null) { if (lconf.size() > 1) throw new ConfigurationException("Only one scriptConfig element is allowed."); if (lconf.size() == 0) lconf = null; } HierarchicalConfiguration scriptConfig; if (lconf == null) scriptConfig = new HierarchicalConfiguration(); else scriptConfig = lconf.get(0); scriptConfig.setProperty("pokerface.scripts.rootDirectory", tmp.toString()); configureScripts(jsLibs, scriptConfig, tmp, watch); if (watch) ScriptDirectoryWatcher = new DirectoryWatchService(); } // Configure the static file directory (if any) Path staticFilesPath = null; if (config.containsKey("files.rootDirectory")) { Path tmp = Utils.MakePath(config.getProperty("files.rootDirectory")); if (!Files.exists(tmp)) throw new FileNotFoundException("Files directory does not exist."); if (!Files.isDirectory(tmp)) throw new FileNotFoundException("'files' path is not a directory."); staticFilesPath = tmp; List<HierarchicalConfiguration> mimeEntries = config.configurationsAt("files.mime-entry"); if (mimeEntries != null) { for (HierarchicalConfiguration entry : mimeEntries) { entry.setDelimiterParsingDisabled(true); String type = entry.getString("[@type]", "").trim(); if (type.length() == 0) throw new ConfigurationException("Invalid mime type entry"); String extensions = entry.getString("[@extensions]", "").trim(); if (extensions.length() == 0) throw new ConfigurationException("Invalid mime extensions for: " + type); ScriptHelperImpl.AddMimeEntry(type, extensions); } } } handlerRegistry.register("/*", new RequestHandler(executor, connPool, byteBufferPool, staticFilesPath, mappings, scripts != null ? Collections.unmodifiableNavigableMap(scripts) : null, config.getBoolean("scripts.allowScriptsToSpecifyDynamicHosts", false) ? hosts : null)); }