List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor
public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue)
From source file:org.lilyproject.indexer.batchbuild.IndexingMapper.java
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); try {// w w w.j av a2 s . c o m Configuration jobConf = context.getConfiguration(); Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", jobConf.get("hbase.zookeeper.quorum")); conf.set("hbase.zookeeper.property.clientPort", jobConf.get("hbase.zookeeper.property.clientPort")); idGenerator = new IdGeneratorImpl(); String zkConnectString = jobConf.get("org.lilyproject.indexer.batchbuild.zooKeeperConnectString"); int zkSessionTimeout = getIntProp("org.lilyproject.indexer.batchbuild.zooKeeperSessionTimeout", null, jobConf); zk = ZkUtil.connect(zkConnectString, zkSessionTimeout); hbaseTableFactory = new HBaseTableFactoryImpl(conf, null, null); TypeManager typeManager = new HBaseTypeManager(idGenerator, conf, zk, hbaseTableFactory); BlobStoreAccessFactory blobStoreAccessFactory = LilyClient.getBlobStoreAccess(zk); RowLog wal = new DummyRowLog("The write ahead log should not be called from within MapReduce jobs."); repository = new HBaseRepository(typeManager, idGenerator, blobStoreAccessFactory, wal, conf, hbaseTableFactory); byte[] indexerConfBytes = Base64.decode(jobConf.get("org.lilyproject.indexer.batchbuild.indexerconf")); IndexerConf indexerConf = IndexerConfBuilder.build(new ByteArrayInputStream(indexerConfBytes), repository); Map<String, String> solrShards = new HashMap<String, String>(); for (int i = 1; true; i++) { String shardName = jobConf.get("org.lilyproject.indexer.batchbuild.solrshard.name." + i); String shardAddress = jobConf.get("org.lilyproject.indexer.batchbuild.solrshard.address." + i); if (shardName == null) break; solrShards.put(shardName, shardAddress); } ShardSelector shardSelector; String shardingConf = jobConf.get("org.lilyproject.indexer.batchbuild.shardingconf"); if (shardingConf != null) { byte[] shardingConfBytes = Base64.decode(shardingConf); shardSelector = JsonShardSelectorBuilder.build(shardingConfBytes); } else { shardSelector = DefaultShardSelectorBuilder.createDefaultSelector(solrShards); } connectionManager = new MultiThreadedHttpConnectionManager(); connectionManager.getParams().setDefaultMaxConnectionsPerHost(5); connectionManager.getParams().setMaxTotalConnections(50); HttpClient httpClient = new HttpClient(connectionManager); SolrServers solrServers = new SolrServers(solrShards, shardSelector, httpClient); indexLocker = new IndexLocker(zk); indexer = new Indexer(indexerConf, repository, solrServers, indexLocker, new IndexerMetrics("dummy")); int workers = getIntProp("org.lilyproject.indexer.batchbuild.threads", 5, jobConf); executor = new ThreadPoolExecutor(workers, workers, 10, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(1000)); executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); } catch (Exception e) { throw new IOException("Error in index build map task setup.", e); } }
From source file:org.geoserver.GeoserverInitStartupListener.java
public void contextInitialized(ServletContextEvent sce) { // start up tctool - remove it before committing!!!! // new tilecachetool.TCTool().setVisible(true); // make sure we remember if GeoServer controls logging or not String strValue = GeoServerExtensions.getProperty(LoggingUtils.RELINQUISH_LOG4J_CONTROL, sce.getServletContext());//from w w w.j a v a2s. co m relinquishLoggingControl = Boolean.valueOf(strValue); // if the server admin did not set it up otherwise, force X/Y axis // ordering // This one is a good place because we need to initialize this property // before any other opeation can trigger the initialization of the CRS // subsystem if (System.getProperty("org.geotools.referencing.forceXY") == null) { System.setProperty("org.geotools.referencing.forceXY", "true"); } if (Boolean.TRUE.equals(Hints.getSystemDefault(Hints.FORCE_LONGITUDE_FIRST_AXIS_ORDER))) { Hints.putSystemDefault(Hints.FORCE_AXIS_ORDER_HONORING, "http"); } Hints.putSystemDefault(Hints.LENIENT_DATUM_SHIFT, true); // setup the referencing tolerance to make it more tolerant to tiny differences // between projections (increases the chance of matching a random prj file content // to an actual EPSG code Hints.putSystemDefault(Hints.COMPARISON_TOLERANCE, 1e-9); // don't allow the connection to the EPSG database to time out. This is a server app, // we can afford keeping the EPSG db always on System.setProperty("org.geotools.epsg.factory.timeout", "-1"); // HACK: java.util.prefs are awful. See // http://www.allaboutbalance.com/disableprefs. When the site comes // back up we should implement their better way of fixing the problem. System.setProperty("java.util.prefs.syncInterval", "5000000"); // Fix issue with tomcat and JreMemoryLeakPreventionListener causing issues with // IIORegistry leading to imageio plugins not being properly initialized ImageIO.scanForPlugins(); // HACK: under JDK 1.4.2 the native java image i/o stuff is failing // in all containers besides Tomcat. If running under jdk 1.4.2 we // disable the native codecs, unless the user forced the setting already if (System.getProperty("java.version").startsWith("1.4") && (System.getProperty("com.sun.media.imageio.disableCodecLib") == null)) { LOGGER.warning("Disabling mediaLib acceleration since this is a " + "java 1.4 VM.\n If you want to force its enabling, " // + "set -Dcom.sun.media.imageio.disableCodecLib=true " + "in your virtual machine"); System.setProperty("com.sun.media.imageio.disableCodecLib", "true"); } else { // in any case, the native png reader is worse than the pure java ones, so // let's disable it (the native png writer is on the other side faster)... ImageIOExt.allowNativeCodec("png", ImageReaderSpi.class, false); ImageIOExt.allowNativeCodec("png", ImageWriterSpi.class, true); } // initialize geotools factories so that we don't make a spi lookup every time a factory is needed Hints.putSystemDefault(Hints.FILTER_FACTORY, CommonFactoryFinder.getFilterFactory2(null)); Hints.putSystemDefault(Hints.STYLE_FACTORY, CommonFactoryFinder.getStyleFactory(null)); Hints.putSystemDefault(Hints.FEATURE_FACTORY, CommonFactoryFinder.getFeatureFactory(null)); // initialize the default executor service final ThreadPoolExecutor executor = new ThreadPoolExecutor(CoverageAccessInfoImpl.DEFAULT_CorePoolSize, CoverageAccessInfoImpl.DEFAULT_MaxPoolSize, CoverageAccessInfoImpl.DEFAULT_KeepAliveTime, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); Hints.putSystemDefault(Hints.EXECUTOR_SERVICE, executor); }
From source file:org.apache.cassandra.hadoop.AbstractColumnFamilyInputFormat.java
public List<InputSplit> getSplits(JobContext context) throws IOException { Configuration conf = HadoopCompat.getConfiguration(context); ;//from w ww .j a v a 2 s . c o m validateConfiguration(conf); // cannonical ranges and nodes holding replicas List<TokenRange> masterRangeNodes = getRangeMap(conf); keyspace = ConfigHelper.getInputKeyspace(conf); cfName = ConfigHelper.getInputColumnFamily(conf); partitioner = ConfigHelper.getInputPartitioner(conf); logger.debug("partitioner is " + partitioner); // cannonical ranges, split into pieces, fetching the splits in parallel ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); List<InputSplit> splits = new ArrayList<InputSplit>(); try { List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>(); KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf); Range<Token> jobRange = null; if (jobKeyRange != null) { if (jobKeyRange.start_key != null) { if (!partitioner.preservesOrder()) throw new UnsupportedOperationException( "KeyRange based on keys can only be used with a order preserving paritioner"); if (jobKeyRange.start_token != null) throw new IllegalArgumentException("only start_key supported"); if (jobKeyRange.end_token != null) throw new IllegalArgumentException("only start_key supported"); jobRange = new Range<>(partitioner.getToken(jobKeyRange.start_key), partitioner.getToken(jobKeyRange.end_key), partitioner); } else if (jobKeyRange.start_token != null) { jobRange = new Range<>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token), partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner); } else { logger.warn("ignoring jobKeyRange specified without start_key or start_token"); } } for (TokenRange range : masterRangeNodes) { if (jobRange == null) { // for each range, pick a live owner and ask it to compute bite-sized splits splitfutures.add(executor.submit(new SplitCallable(range, conf))); } else { Range<Token> dhtRange = new Range<Token>( partitioner.getTokenFactory().fromString(range.start_token), partitioner.getTokenFactory().fromString(range.end_token), partitioner); if (dhtRange.intersects(jobRange)) { for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) { range.start_token = partitioner.getTokenFactory().toString(intersection.left); range.end_token = partitioner.getTokenFactory().toString(intersection.right); // for each range, pick a live owner and ask it to compute bite-sized splits splitfutures.add(executor.submit(new SplitCallable(range, conf))); } } } } // wait until we have all the results back for (Future<List<InputSplit>> futureInputSplits : splitfutures) { try { splits.addAll(futureInputSplits.get()); } catch (Exception e) { throw new IOException("Could not get input splits", e); } } } finally { executor.shutdownNow(); } assert splits.size() > 0; Collections.shuffle(splits, new Random(System.nanoTime())); return splits; }
From source file:org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.java
/** * Creates a replication manager and sets the watch on all the other registered region servers * @param replicationQueues the interface for manipulating replication queues * @param replicationPeers//w w w .j ava2 s. c o m * @param replicationTracker * @param conf the configuration to use * @param stopper the stopper object for this region server * @param fs the file system to use * @param logDir the directory that contains all hlog directories of live RSs * @param oldLogDir the directory where old logs are archived * @param clusterId */ public ReplicationSourceManager(final ReplicationQueues replicationQueues, final ReplicationPeers replicationPeers, final ReplicationTracker replicationTracker, final Configuration conf, final Stoppable stopper, final FileSystem fs, final Path logDir, final Path oldLogDir, final UUID clusterId) { this.sources = new ArrayList<ReplicationSourceInterface>(); this.replicationQueues = replicationQueues; this.replicationPeers = replicationPeers; this.replicationTracker = replicationTracker; this.stopper = stopper; this.hlogsById = new HashMap<String, SortedSet<String>>(); this.oldsources = new ArrayList<ReplicationSourceInterface>(); this.conf = conf; this.fs = fs; this.logDir = logDir; this.oldLogDir = oldLogDir; this.sleepBeforeFailover = conf.getLong("replication.sleep.before.failover", 2000); this.clusterId = clusterId; this.replicationTracker.registerListener(this); this.replicationPeers.getAllPeerIds(); // It's preferable to failover 1 RS at a time, but with good zk servers // more could be processed at the same time. int nbWorkers = conf.getInt("replication.executor.workers", 1); // use a short 100ms sleep since this could be done inline with a RS startup // even if we fail, other region servers can take care of it this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); tfb.setNameFormat("ReplicationExecutor-%d"); this.executor.setThreadFactory(tfb.build()); this.rand = new Random(); }
From source file:org.wso2.carbon.event.output.adaptor.http.HTTPEventAdaptor.java
@Override protected void init() { this.populateAdapterMessageProps(); this.supportOutputMessageTypes = new ArrayList<String>(); this.supportOutputMessageTypes.add(MessageType.XML); this.supportOutputMessageTypes.add(MessageType.JSON); this.supportOutputMessageTypes.add(MessageType.TEXT); this.executorService = new ThreadPoolExecutor(HTTPEventAdaptorConstants.ADAPTER_MIN_THREAD_POOL_SIZE, HTTPEventAdaptorConstants.ADAPTER_MAX_THREAD_POOL_SIZE, HTTPEventAdaptorConstants.DEFAULT_KEEP_ALIVE_TIME, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(HTTPEventAdaptorConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE)); }
From source file:com.pinterest.rocksplicator.controller.DispatcherTest.java
@Test public void testChainedTask() throws Exception { TaskBase task = new SleepIncrementTask(100).andThen(new SleepIncrementTask(150)) .andThen(new SleepIncrementTask(200)).getEntity(); final CountDownLatch latch = new CountDownLatch(3); FIFOTaskQueue tq = new FIFOTaskQueue(10) { @Override/* ww w . j a v a 2 s . com*/ public boolean finishTask(final long id, final String output) { latch.countDown(); return super.finishTask(id, output); } @Override public long finishTaskAndEnqueueRunningTask(final long id, final String output, final TaskBase newTask, final String worker) { latch.countDown(); return super.finishTaskAndEnqueueRunningTask(id, output, newTask, worker); } }; tq.enqueueTask(task, Integer.toString(++nameCounter), 0); Semaphore idleWorkersSemaphore = new Semaphore(2); ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(2, 2, 0, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(2)); WorkerPool workerPool = new WorkerPool(threadPoolExecutor, idleWorkersSemaphore, tq); TaskDispatcher dispatcher = new TaskDispatcher(2, idleWorkersSemaphore, workerPool, tq); dispatcher.start(); Assert.assertTrue(latch.await(30, TimeUnit.SECONDS)); Assert.assertEquals(SleepIncrementTask.executionCounter.intValue(), 3); Assert.assertEquals(tq.getResult(0), "0"); Assert.assertEquals(tq.getResult(1), "1"); Assert.assertEquals(tq.getResult(2), "2"); dispatcher.stop(); }
From source file:org.apache.cxf.systest.jaxrs.AbstractJAXRSContinuationsTest.java
protected void doTestContinuation(String pathSegment) throws Exception { final String port = getPort(); ThreadPoolExecutor executor = new ThreadPoolExecutor(5, 5, 0, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(10)); CountDownLatch startSignal = new CountDownLatch(1); CountDownLatch doneSignal = new CountDownLatch(1); List<BookWorker> workers = new ArrayList<>(5); for (int x = 1; x < 6; x++) { workers.add(new BookWorker("http://localhost:" + port + getBaseAddress() + pathSegment + "/" + x, Integer.toString(x), "CXF in Action" + x, startSignal, doneSignal)); }/*from w w w . j av a2 s. co m*/ for (BookWorker w : workers) { executor.execute(w); } startSignal.countDown(); doneSignal.await(60, TimeUnit.SECONDS); executor.shutdownNow(); assertEquals("Not all invocations have completed", 0, doneSignal.getCount()); for (BookWorker w : workers) { w.checkError(); } }
From source file:com.mobicomkit.api.attachment.AttachmentManager.java
/** * Constructs the work queues and thread pools used to download and decode images. *//* w w w . j a v a 2 s . co m*/ private AttachmentManager() { attachmentInProgress = new ArrayList<String>(); attachmentTaskList = new ArrayList<AttachmentTask>(); /* * Creates a work queue for the pool of Thread objects used for downloading, using a linked * list queue that blocks when the queue is empty. */ mDownloadWorkQueue = new LinkedBlockingQueue<Runnable>(); /* * Creates a work queue for the pool of Thread objects used for decoding, using a linked * list queue that blocks when the queue is empty. */ mDecodeWorkQueue = new LinkedBlockingQueue<Runnable>(); /* * Creates a work queue for the set of of task objects that control downloading and * decoding, using a linked list queue that blocks when the queue is empty. */ mPhotoTaskWorkQueue = new LinkedBlockingQueue<AttachmentTask>(); /* * Creates a new pool of Thread objects for the download work queue */ mDownloadThreadPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME, KEEP_ALIVE_TIME_UNIT, mDownloadWorkQueue); /* * Creates a new pool of Thread objects for the decoding work queue */ mDecodeThreadPool = new ThreadPoolExecutor(NUMBER_OF_CORES, NUMBER_OF_CORES, KEEP_ALIVE_TIME, KEEP_ALIVE_TIME_UNIT, mDecodeWorkQueue); // Instantiates a new cache based on the cache size estimate mPhotoCache = new LruCache<String, Bitmap>(IMAGE_CACHE_SIZE) { // /* // * This overrides the default sizeOf() implementation to return the // * correct size of each cache entry. // */ // // @Override // protected int sizeOf(String paramURL, Bitmap image) { // return image.getByteCount(); // } }; /* * Instantiates a new anonymous Handler object and defines its * handleMessage() method. The Handler *must* run on the UI thread, because it moves photo * Bitmaps from the PhotoTask object to the View object. * To force the Handler to run on the UI thread, it's defined as part of the PhotoManager * constructor. The constructor is invoked when the class is first referenced, and that * happens when the View invokes startDownload. Since the View runs on the UI Thread, so * does the constructor and the Handler. */ mHandler = new Handler(Looper.getMainLooper()) { /* * handleMessage() defines the operations to perform when the * Handler receives a new Message to process. */ @Override public void handleMessage(Message inputMessage) { // Gets the image task from the incoming Message object. AttachmentTask attachmentTask = (AttachmentTask) inputMessage.obj; // Sets an PhotoView that's a weak reference to the // input ImageView AttachmentView localView = attachmentTask.getPhotoView(); // If this input view isn't null if (localView != null) { /* * Gets the URL of the *weak reference* to the input * ImageView. The weak reference won't have changed, even if * the input ImageView has. */ // URL localURL = localView.getLocation(); /* * Compares the URL of the input ImageView to the URL of the * weak reference. Only updates the bitmap in the ImageView * if this particular Thread is supposed to be serving the * ImageView. */ //if (attachmentTask.getImageURL() == localURL) /* * Chooses the action to take, based on the incoming message * */ //TODO: show the status properly based on message status ... switch (inputMessage.what) { case DOWNLOAD_STARTED: localView.getMessage().setAttDownloadInProgress(true); localView.getProressBar().setVisibility(View.VISIBLE); break; case DOWNLOAD_COMPLETE: localView.getProressBar().setProgress(70); localView.getMessage().setAttDownloadInProgress(false); break; case DECODE_STARTED: localView.getProressBar().setVisibility(View.VISIBLE); localView.getProressBar().setProgress(90); break; /* * The decoding is done, so this sets the * ImageView's bitmap to the bitmap in the * incoming message */ case TASK_COMPLETE: if (localView.getDownloadProgressLayout() != null && !localView.getMessage().isAttachmentUploadInProgress()) { localView.getDownloadProgressLayout().setVisibility(View.GONE); } else if (localView.getProressBar() != null) { localView.getProressBar().setVisibility(View.GONE); } localView.setImageBitmap(attachmentTask.getImage()); recycleTask(attachmentTask); break; // The download failed, sets the background color to dark red case DOWNLOAD_FAILED: //localView.setStatusResource(R.drawable.imagedownloadfailed); localView.getProressBar().setProgress(0); localView.getMessage().setAttDownloadInProgress(false); localView.getDownloadProgressLayout().setVisibility(View.GONE); localView.setVisibility(View.INVISIBLE); Toast.makeText(localView.getContext(), "Download failed.", Toast.LENGTH_SHORT).show(); // Attempts to re-use the Task object recycleTask(attachmentTask); break; default: // Otherwise, calls the super method super.handleMessage(inputMessage); } } } }; }
From source file:de.tu_dortmund.ub.data.util.TPUUtil.java
public static String executeInit(final String initResourceFile, final String serviceName, final Integer engineThreads, final Properties config, final int cnt) throws Exception { // create job final Callable<String> initTask = new Init(initResourceFile, config, cnt); // work on jobs final ThreadPoolExecutor pool = new ThreadPoolExecutor(engineThreads, engineThreads, 0L, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); try {/*from w ww . j a va2 s. c o m*/ final List<Callable<String>> tasks = new LinkedList<>(); tasks.add(initTask); final List<Future<String>> futureList = pool.invokeAll(tasks); final Iterator<Future<String>> iterator = futureList.iterator(); if (iterator.hasNext()) { final Future<String> f = iterator.next(); final String initResult = f.get(); final String message1 = String.format("[%s][%d] initResult = '%s'", serviceName, cnt, initResult); LOG.info(message1); return initResult; } } catch (final Exception e) { LOG.error("[{]][{}] something went wrong at init part execution", serviceName, cnt, e); throw e; } finally { pool.shutdown(); } return null; }
From source file:com.applozic.mobicomkit.api.attachment.AttachmentManager.java
/** * Constructs the work queues and thread pools used to download and decode images. *//* w w w. j av a2 s .c o m*/ private AttachmentManager() { attachmentInProgress = new ArrayList<String>(); attachmentTaskList = new ArrayList<AttachmentTask>(); /* * Creates a work queue for the pool of Thread objects used for downloading, using a linked * list queue that blocks when the queue is empty. */ mDownloadWorkQueue = new LinkedBlockingQueue<Runnable>(); /* * Creates a work queue for the pool of Thread objects used for decoding, using a linked * list queue that blocks when the queue is empty. */ mDecodeWorkQueue = new LinkedBlockingQueue<Runnable>(); /* * Creates a work queue for the set of of task objects that control downloading and * decoding, using a linked list queue that blocks when the queue is empty. */ mPhotoTaskWorkQueue = new LinkedBlockingQueue<AttachmentTask>(); /* * Creates a new pool of Thread objects for the download work queue */ mDownloadThreadPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME, KEEP_ALIVE_TIME_UNIT, mDownloadWorkQueue); /* * Creates a new pool of Thread objects for the decoding work queue */ mDecodeThreadPool = new ThreadPoolExecutor(NUMBER_OF_CORES, NUMBER_OF_CORES, KEEP_ALIVE_TIME, KEEP_ALIVE_TIME_UNIT, mDecodeWorkQueue); // Instantiates a new cache based on the cache size estimate mPhotoCache = new LruCache<String, Bitmap>(IMAGE_CACHE_SIZE) { // /* // * This overrides the default sizeOf() implementation to return the // * correct size of each cache entry. // */ // // @Override // protected int sizeOf(String paramURL, Bitmap image) { // return image.getByteCount(); // } }; /* * Instantiates a new anonymous Handler object and defines its * handleMessage() method. The Handler *must* run on the UI thread, because it moves photo * Bitmaps from the PhotoTask object to the View object. * To force the Handler to run on the UI thread, it's defined as part of the PhotoManager * constructor. The constructor is invoked when the class is first referenced, and that * happens when the View invokes startDownload. Since the View runs on the UI Thread, so * does the constructor and the Handler. */ mHandler = new Handler(Looper.getMainLooper()) { /* * handleMessage() defines the operations to perform when the * Handler receives a new Message to process. */ @Override public void handleMessage(Message inputMessage) { // Gets the image task from the incoming Message object. AttachmentTask attachmentTask = (AttachmentTask) inputMessage.obj; // Sets an PhotoView that's a weak reference to the // input ImageView AttachmentView localView = attachmentTask.getPhotoView(); // If this input view isn't null if (localView != null) { /* * Gets the URL of the *weak reference* to the input * ImageView. The weak reference won't have changed, even if * the input ImageView has. */ // URL localURL = localView.getLocation(); /* * Compares the URL of the input ImageView to the URL of the * weak reference. Only updates the bitmap in the ImageView * if this particular Thread is supposed to be serving the * ImageView. */ //if (attachmentTask.getImageURL() == localURL) /* * Chooses the action to take, based on the incoming message * */ //TODO: show the status properly based on message status ... switch (inputMessage.what) { case DOWNLOAD_STARTED: localView.getMessage().setAttDownloadInProgress(true); localView.getProressBar().setVisibility(View.VISIBLE); break; case DOWNLOAD_COMPLETE: localView.getProressBar().setProgress(70); localView.getMessage().setAttDownloadInProgress(false); break; case DECODE_STARTED: localView.getProressBar().setVisibility(View.VISIBLE); localView.getProressBar().setProgress(90); break; /* * The decoding is done, so this sets the * ImageView's bitmap to the bitmap in the * incoming message */ case TASK_COMPLETE: if (localView.getDownloadProgressLayout() != null && !localView.getMessage().isAttachmentUploadInProgress()) { localView.getDownloadProgressLayout().setVisibility(View.GONE); } else if (localView.getProressBar() != null) { localView.getProressBar().setVisibility(View.GONE); } BroadcastService.sendMessageUpdateBroadcast(localView.getContext(), BroadcastService.INTENT_ACTIONS.MESSAGE_ATTACHMENT_DOWNLOAD_DONE.toString(), localView.getMessage()); localView.setImageBitmap(attachmentTask.getImage()); recycleTask(attachmentTask); break; // The download failed, sets the background color to dark red case DOWNLOAD_FAILED: //localView.setStatusResource(R.drawable.imagedownloadfailed); localView.getProressBar().setProgress(0); localView.getMessage().setAttDownloadInProgress(false); localView.getDownloadProgressLayout().setVisibility(View.GONE); localView.setVisibility(View.INVISIBLE); localView.cancelDownload(); BroadcastService.sendMessageUpdateBroadcast(localView.getContext(), BroadcastService.INTENT_ACTIONS.MESSAGE_ATTACHMENT_DOWNLOAD_FAILD.toString(), localView.getMessage()); Toast.makeText(localView.getContext(), "Download failed.", Toast.LENGTH_SHORT).show(); // Attempts to re-use the Task object recycleTask(attachmentTask); break; default: // Otherwise, calls the super method super.handleMessage(inputMessage); } } } }; }