Java tutorial
/** * This file is part of PackRat, an Android app for managing media collections. * Copyright (C) 2009-2012 Jens Finkhaeuser <jens@finkhaeuser.de> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package de.unwesen.packrat.api; import android.net.Uri; import android.content.Context; import android.os.Handler; import android.os.Message; import java.net.URI; import java.io.InputStream; import java.io.IOException; import java.net.URISyntaxException; import java.net.URLEncoder; import org.apache.http.HttpEntity; import org.apache.http.client.methods.HttpGet; import org.apache.http.HttpResponse; import org.json.JSONObject; import org.json.JSONArray; import org.json.JSONException; import java.util.LinkedList; import java.util.HashMap; import java.util.List; import java.util.Collections; import java.util.Comparator; import de.unwesen.web.google.base.MediaParser; import de.unwesen.packrat.app.R; import de.unwesen.packrat.metadata.Media; import de.unwesen.packrat.Error; import de.unwesen.packrat.Environment; import de.unwesen.util.RegexMap; import java.util.regex.Pattern; import android.util.Log; /** * The FeedReader class reads feeds of Media entries from Google Base **/ public class FeedReader extends APIBase { /*************************************************************************** * Public constants **/ public static final int FR_SUCCESS = Error.FEED_READER_BASE; public static final int FR_INVALID_FEED_URI = 1 + Error.FEED_READER_BASE; public static final int FR_EMPTY_FEED = 2 + Error.FEED_READER_BASE; public static final int FR_FEED_PARSE_FAILURE = 3 + Error.FEED_READER_BASE; public static final int FR_NETWORK_ERROR = 4 + Error.FEED_READER_BASE; public static final int FR_UNKNOWN_ERROR = 5 + Error.FEED_READER_BASE; // Search types public static final int SEARCH_TYPE_UPC = 0; public static final int SEARCH_TYPE_ISBN = 1; public static final int SEARCH_TYPE_TITLE = 2; public static final int SEARCH_TYPE_FREEFORM = 3; public static final int SEARCH_TYPE_WEB = 4; /*************************************************************************** * Private constants **/ // Log ID private static final String LTAG = "FeedReader"; // Fetcher startup delay. Avoids high load at startup that could impact UX. private static final int FETCHER_STARTUP_DELAY = 5 * 1000; // Fetcher thread wakes up at this interval (msec). private static final int FETCHER_SLEEP_TIME = 60 * 1000; private static final String PRODUCT_BASE_URI = "http://www.google.com/base/feeds/snippets/-/products?bq="; private static final String WEB_BASE_URI = "http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&key=%s&q=%s"; // Search masks - indices are SEARCH_TYPE_* above private static final String[] SEARCH_MASKS = { "[upc(text):\"%s\"]", "[isbn(text):\"%s\"]", "[title(text):\"%s\"]", "%s", }; // Minimum word length for words to be counted in the web search results. private static final int MIN_WORD_LENGTH = 4; // Search API key for PackRat (associated with REFERER_URL) private static final String WEB_SEARCH_API_KEY = "ABQIAAAA5HIniMYIZUSELwoiGKgcPxQZSR71HxAZ4Mx49yK0yuhu94q_bxRj3lRVofWY-NRrfvwBI5OEDl87PQ"; /*************************************************************************** * The Fetcher class fetches and parses Feeds, then notifies a Handler of * the results. **/ private class Fetcher extends Thread { public boolean keepRunning = true; private Uri mUri; private Handler mHandler; public Fetcher(Uri uri, Handler handler) { super(); mUri = uri; mHandler = handler; } @Override public void run() { // Delay before starting to fetch stuff. try { sleep(FETCHER_STARTUP_DELAY); } catch (java.lang.InterruptedException ex) { // pass } // The loop construct prevents updateFeed from being executed if an // external interrupt occurred with a request to shut down the thread. while (keepRunning) { updateFeed(mUri, mHandler); keepRunning = false; } } } /*************************************************************************** * WebSearchMachine iterates over a list of words, generating search terms * to use for product searches. **/ private class WebSearchMachine implements Handler.Callback { // Search terms private List<HashMap.Entry> mWordList; private int mCountLimit; private int mMaxCountLimit; private Handler mUpchainHandler; public WebSearchMachine(List<HashMap.Entry> wordList, Handler upchain) { mWordList = wordList; mCountLimit = 2; // lowest possible count available mMaxCountLimit = (Integer) wordList.get(0).getValue(); mUpchainHandler = upchain; } public void nextTerm() { String term = ""; for (HashMap.Entry e : mWordList) { if ((Integer) e.getValue() < mCountLimit) { continue; } term += e.getKey() + " "; } if (0 == term.length()) { mUpchainHandler.obtainMessage(FR_EMPTY_FEED).sendToTarget(); return; } // Log.d(LTAG, "Term: " + term); findBy(SEARCH_TYPE_FREEFORM, term, new Handler(this)); } public boolean handleMessage(Message msg) { if (FR_SUCCESS == msg.what) { mUpchainHandler.obtainMessage(msg.what, msg.obj).sendToTarget(); return true; } ++mCountLimit; if (mCountLimit > mMaxCountLimit) { mUpchainHandler.obtainMessage(FR_EMPTY_FEED).sendToTarget(); } else { nextTerm(); } return true; } } /*************************************************************************** * Static data **/ private static RegexMap<Integer> sMediaTypes; static { sMediaTypes = new RegexMap<Integer>(); int flags = Pattern.CASE_INSENSITIVE | Pattern.UNICODE_CASE; HashMap<String, Integer> patternMap = new HashMap<String, Integer>(); // Videos patternMap.put("(hd[- ]dvd.*)?blu-ray", Media.MEDIA_SUBTYPE_BLU_RAY); patternMap.put("hd[- ]dvd", Media.MEDIA_SUBTYPE_HD_DVD); patternMap.put("blue-ray", Media.MEDIA_SUBTYPE_BLU_RAY); patternMap.put("blu[-e]?ray", Media.MEDIA_SUBTYPE_BLU_RAY); patternMap.put("vhs", Media.MEDIA_SUBTYPE_VHS); patternMap.put("dvds?", Media.MEDIA_SUBTYPE_DVD); patternMap.put("laserdiscs?", Media.MEDIA_SUBTYPE_LASERDISC); patternMap.put("videos?", Media.MEDIA_TYPE_VIDEO); patternMap.put("movies?", Media.MEDIA_TYPE_VIDEO); patternMap.put("television", Media.MEDIA_TYPE_VIDEO); patternMap.put("tv( *series)?", Media.MEDIA_TYPE_VIDEO); // CDs patternMap.put("cds?", Media.MEDIA_SUBTYPE_CD); patternMap.put("vinyl", Media.MEDIA_SUBTYPE_VINYL); patternMap.put("records?", Media.MEDIA_SUBTYPE_VINYL); patternMap.put("mini[ -]?discs?", Media.MEDIA_SUBTYPE_MINI_DISC); patternMap.put("md", Media.MEDIA_SUBTYPE_MINI_DISC); patternMap.put("music", Media.MEDIA_TYPE_AUDIO); patternMap.put("audiobooks?", Media.MEDIA_TYPE_AUDIO); // Books patternMap.put("paperbacks?", Media.MEDIA_SUBTYPE_PAPERBACK); patternMap.put("hardcover", Media.MEDIA_SUBTYPE_HARDCOVER); patternMap.put("comics?", Media.MEDIA_SUBTYPE_COMIC); patternMap.put("comic *books?", Media.MEDIA_SUBTYPE_COMIC); patternMap.put("graphic *novels?", Media.MEDIA_SUBTYPE_COMIC); patternMap.put("magazines?", Media.MEDIA_SUBTYPE_MAGAZINE); patternMap.put("(non)?fiction", Media.MEDIA_TYPE_TEXT); patternMap.put("literature", Media.MEDIA_TYPE_TEXT); patternMap.put("books?", Media.MEDIA_TYPE_TEXT); // Games patternMap.put("games? *pc", Media.MEDIA_SUBTYPE_PC); patternMap.put("pc .*games?", Media.MEDIA_SUBTYPE_PC); patternMap.put("pc", Media.MEDIA_SUBTYPE_PC); patternMap.put("computer *games?", Media.MEDIA_SUBTYPE_PC); patternMap.put("ps2.*games?", Media.MEDIA_SUBTYPE_PS2); patternMap.put("playstation *2.*games?", Media.MEDIA_SUBTYPE_PS2); patternMap.put("games?.*ps2", Media.MEDIA_SUBTYPE_PS2); patternMap.put("games?.*playstation *2", Media.MEDIA_SUBTYPE_PS2); patternMap.put("playstation *2", Media.MEDIA_SUBTYPE_PS3); patternMap.put("ps1?.*games?", Media.MEDIA_SUBTYPE_PS1); patternMap.put("playstation *1?.*games?", Media.MEDIA_SUBTYPE_PS1); patternMap.put("games?.*ps1?", Media.MEDIA_SUBTYPE_PS1); patternMap.put("games?.*playstation *1?", Media.MEDIA_SUBTYPE_PS1); patternMap.put("playstation *1?", Media.MEDIA_SUBTYPE_PS1); patternMap.put("ps3.*games?", Media.MEDIA_SUBTYPE_PS3); patternMap.put("playstation *3.*games?", Media.MEDIA_SUBTYPE_PS3); patternMap.put("games?.*ps3", Media.MEDIA_SUBTYPE_PS3); patternMap.put("games?.*playstation *3", Media.MEDIA_SUBTYPE_PS3); patternMap.put("playstation *3", Media.MEDIA_SUBTYPE_PS3); patternMap.put("wii", Media.MEDIA_SUBTYPE_WII); patternMap.put("(nintendo)? *wii *(games?)?", Media.MEDIA_SUBTYPE_WII); patternMap.put("gamecube", Media.MEDIA_SUBTYPE_GAMECUBE); patternMap.put("nintendo *ds", Media.MEDIA_SUBTYPE_DS); patternMap.put("xbox ?360", Media.MEDIA_SUBTYPE_XBOX360); patternMap.put("xbox", Media.MEDIA_SUBTYPE_XBOX); patternMap.put("games?", Media.MEDIA_TYPE_GAME); patternMap.put("gaming", Media.MEDIA_TYPE_GAME); patternMap.put("video games?", Media.MEDIA_TYPE_GAME); patternMap.put("video games?:games?", Media.MEDIA_TYPE_GAME); patternMap.put("video game software", Media.MEDIA_TYPE_GAME); patternMap.put("video game cartridges?", Media.MEDIA_TYPE_GAME); patternMap.put("software", Media.MEDIA_TYPE_GAME); patternMap.put("applications?", Media.MEDIA_TYPE_GAME); // TODO Add more patterns as they become apparent for (String key : patternMap.keySet()) { String pattern = String.format(".*%s.*", key); sMediaTypes.put(Pattern.compile(pattern, flags), patternMap.get(key)); } } /*************************************************************************** * Data members **/ private Fetcher mFetcher; /*************************************************************************** * Implementation **/ public FeedReader(Context context) { super(context); // XXX Disable in production builds. // testPatterns(); } /** * Disabled in production builds. **/ // private void testPatterns() // { // try { // InputStream is = mContext.getResources().openRawResource(R.raw.patterns); // java.io.BufferedReader br = new java.io.BufferedReader( // new java.io.InputStreamReader(is)); // // int[] types = mContext.getResources().getIntArray(R.array.media_types); // String[] names = mContext.getResources().getStringArray(R.array.media_type_names); // // String strLine = null; // while (null != (strLine = br.readLine())) { // Integer type = sMediaTypes.get(strLine); // if (null == type) { // // This word is a media type keyword, so we'll ignore it. // Log.w(LTAG, String.format("Unrecognized: %s", strLine)); // continue; // } // // String tn = null; // for (int i = 0 ; i < types.length ; ++i) { // if (type == types[i]) { // tn = names[i]; // break; // } // } // // Log.d(LTAG, String.format("%s\t\t\t%s (%d)", strLine, tn, type)); // } // } catch (Exception ex) { // Log.w(LTAG, "Error: " + ex); // } // } public void findBy(int searchType, String searchTerm, Handler handler) { if (null == searchTerm || 0 == searchTerm.length()) { Log.e(LTAG, "Empty search term!"); } if (SEARCH_TYPE_WEB == searchType) { webSearch(searchTerm, handler); return; } // TODO use APIBase's Requester/requestUri, etc. Uri uri = Uri.parse(PRODUCT_BASE_URI + Uri.encode(String.format(SEARCH_MASKS[searchType], searchTerm))); //Log.d(LTAG, "Search URI: " + uri); if (null != mFetcher) { mFetcher.keepRunning = false; mFetcher.interrupt(); } mFetcher = new Fetcher(uri, handler); mFetcher.start(); } private void webSearch(String term, final Handler handler) { Uri uri = Uri.parse(String.format(WEB_BASE_URI, WEB_SEARCH_API_KEY, Uri.encode(term))); //Log.d(LTAG, "Search URI: " + uri); requestUri(uri, new Handler(new Handler.Callback() { public boolean handleMessage(Message msg) { if (ERR_SUCCESS == msg.what) { String data = (String) msg.obj; handleWebSearchResults(data, handler); } else { handler.obtainMessage(msg.what).sendToTarget(); } return true; } })); } private void handleWebSearchResults(String data, final Handler handler) { // Log.d(LTAG, "Result: " + data); try { // First check response status. If that is != 200, we may have an error // message to log, and definitely can bail out early. JSONObject result = new JSONObject(data); int status = result.getInt("responseStatus"); if (200 != status) { Log.e(LTAG, "Server error: " + result.getString("responseDetails")); handler.obtainMessage(ERR_SERVER).sendToTarget(); return; } JSONObject d = result.getJSONObject("responseData"); JSONArray res = d.getJSONArray("results"); // Count the occurrences of various words across all returned titles. // If a word is known to designate media type, we'll ignore it. We'll // also ignore words shorter than MIN_WORD_LENGTH. HashMap<String, Integer> wordCount = new HashMap<String, Integer>(); for (int i = 0; i < res.length(); ++i) { JSONObject entry = res.getJSONObject(i); String title = entry.getString("titleNoFormatting"); String[] words = title.split(" "); for (String word : words) { if (MIN_WORD_LENGTH > word.length()) { // Too short continue; } Integer type = sMediaTypes.get(word); if (null != type) { // This word is a media type keyword, so we'll ignore it. continue; } word = word.toLowerCase(); Integer count = wordCount.get(word); if (null == count) { wordCount.put(word, 1); } else { wordCount.put(word, count + 1); } } } // Now that we've counted words, first filter out all words that contain // non-letters. Those are likely not good candidates for further searching. // We ignore them by putting their count to zero. // The tricky part here is that trailing non-letters are likely fine, we // just can't use them for searches. HashMap<String, Integer> filteredWordCount = new HashMap<String, Integer>(); for (String word : wordCount.keySet()) { // Log.d(LTAG, "Word: " + word + " -> " + wordCount.get(word)); int lastLetter = -1; int lastNonLetter = -1; for (int i = 0; i < word.length(); ++i) { int codePoint = word.codePointAt(i); if (Character.isLetter(codePoint) || Character.isDigit(codePoint)) { lastLetter = i; if (lastNonLetter > 0) { // Due to the sequential nature of our iteration, we know that // at(i) is now a letter following a non-letter, so we can // safely ignore this word. break; } } else { lastNonLetter = i; if (-1 == lastLetter) { // We have non-letters preceeding letters, that word should // likely be discarded. break; } } } if (-1 == lastNonLetter) { // Word is pure letters, keep it. filteredWordCount.put(word, wordCount.get(word)); } else if (-1 == lastLetter) { // Word is pure non-letters, discard it. } else if (lastNonLetter > lastLetter) { // Word has trailing non-letters, cut it. Integer count = wordCount.get(word); word = word.substring(0, lastLetter + 1); filteredWordCount.put(word, count); } else { // Word has non-letters in the middle. } } // Next filter step is optional: if we had more than one title to go // through, then chances are that words with only one count should be // ignored. If we had only one title, that's not an optimization we can // safely make. if (1 < res.length()) { wordCount = filteredWordCount; filteredWordCount = new HashMap<String, Integer>(); for (String word : wordCount.keySet()) { int count = wordCount.get(word); if (count > 1) { filteredWordCount.put(word, count); } } } // If we're left with no results, give up right here. if (0 == filteredWordCount.size()) { handler.obtainMessage(ERR_EMPTY_RESPONSE).sendToTarget(); return; } // If we've got results, sort them. List<HashMap.Entry> wordList = new LinkedList<HashMap.Entry>(filteredWordCount.entrySet()); Collections.sort(wordList, new Comparator() { public int compare(Object o1, Object o2) { return -1 * ((Comparable) ((HashMap.Entry) (o1)).getValue()) .compareTo(((HashMap.Entry) (o2)).getValue()); } }); // With the resulting wordList, we'll generate search terms, preferring // more words over fewer words, and words with a higher count over words // with a lower count. WebSearchMachine machine = new WebSearchMachine(wordList, handler); machine.nextTerm(); } catch (JSONException ex) { handler.obtainMessage(ERR_SERIALIZATION).sendToTarget(); } } private int guessMediaType(MediaParser.AtomFeedEntry entry) { // This is the easiest possibility: if any of the keys below is specified, // we can use our regex mapping to map formats to media types. final String keys[] = { MediaParser.XML_G_FORMAT, MediaParser.XML_G_PRODUCT_TYPE, }; for (int i = 0; i < keys.length; ++i) { String key = keys[i]; if (entry.mExtra.containsKey(key)) { String format = entry.mExtra.get(key).toString(); Integer type = sMediaTypes.get(format); if (null != type) { return type; } else { Log.w(LTAG, "Could not interpret format '" + format + "'!"); // By tracking 1 here, we're allowing GA to count each occurrence of // the format string. We need to replace whitespaces, though. format = URLEncoder.encode(format); Environment.instance().mTracker.trackEvent(Environment.CAT_ERROR, Environment.TA_ERR_CATEGORY, format, 1); Environment.instance().mTracker.dispatch(); } } } // TODO might need more. return 0; } private LinkedList<Media> parseFeed(MediaParser.AtomFeed feed) { if (null == feed.mEntries || 0 == feed.mEntries.size()) { return null; } LinkedList<Media> result = new LinkedList<Media>(); for (MediaParser.AtomFeedEntry entry : feed.mEntries) { // Log.d(LTAG, "Entry: " + entry.toString()); Media media = new Media(); if (null == entry.mId) { continue; } media.mId = entry.mId.toString(); if (null == entry.mTitle) { continue; } media.mTitle = entry.mTitle.toString(); if (null == entry.mUpdated) { continue; } media.mUpdated = entry.mUpdated.getDate(); // Guess media type. media.mMediaSubType = guessMediaType(entry); media.mMediaType = ((int) (media.mMediaSubType / 100)) * 100; if (null != entry.mPublished) { media.mPublished = entry.mPublished.getDate(); } // media.mSource = FIXME // media.mWebLink = // Attributes for all products if (entry.mExtra.containsKey(MediaParser.XML_G_UPC)) { media.mUPC = entry.mExtra.get(MediaParser.XML_G_UPC).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_ISBN)) { media.mISBN = entry.mExtra.get(MediaParser.XML_G_ISBN).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_IMAGE_LINK)) { String url = entry.mExtra.get(MediaParser.XML_G_IMAGE_LINK).toString(); media.mImageLink = Uri.parse(url); } // Attributes for all media types if (entry.mExtra.containsKey(MediaParser.XML_G_FORMAT)) { media.mFormat = entry.mExtra.get(MediaParser.XML_G_FORMAT).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_GENRE)) { media.mGenre = entry.mExtra.get(MediaParser.XML_G_GENRE).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_RATING)) { media.mRating = entry.mExtra.get(MediaParser.XML_G_RATING).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_AGE_RANGE)) { media.mAgeRange = entry.mExtra.get(MediaParser.XML_G_AGE_RANGE).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_EDITION)) { media.mEdition = entry.mExtra.get(MediaParser.XML_G_EDITION).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_BRAND)) { media.mBrand = entry.mExtra.get(MediaParser.XML_G_BRAND).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_YEAR)) { media.mYear = ((MediaParser.X_AtomNumberNode) entry.mExtra.get(MediaParser.XML_G_YEAR)) .getIntValue(); } if (Media.MEDIA_TYPE_VIDEO == media.mMediaType) { // Video media if (entry.mExtra.containsKey(MediaParser.XML_G_ACTOR)) { media.mActor = entry.mExtra.get(MediaParser.XML_G_ACTOR).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_DIRECTOR)) { media.mDirector = entry.mExtra.get(MediaParser.XML_G_DIRECTOR).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_ASPECT_RATIO)) { media.mAspectRatio = entry.mExtra.get(MediaParser.XML_G_ASPECT_RATIO).toString(); } } else if (Media.MEDIA_TYPE_AUDIO == media.mMediaType) { // Audio media if (entry.mExtra.containsKey(MediaParser.XML_G_ARTIST)) { media.mArtist = entry.mExtra.get(MediaParser.XML_G_ARTIST).toString(); } } else if (Media.MEDIA_TYPE_TEXT == media.mMediaType) { // Text media if (entry.mExtra.containsKey(MediaParser.XML_G_PUBLISHER)) { media.mPublisher = entry.mExtra.get(MediaParser.XML_G_PUBLISHER).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_AUTHOR)) { media.mAuthor = entry.mExtra.get(MediaParser.XML_G_AUTHOR).toString(); } if (entry.mExtra.containsKey(MediaParser.XML_G_PAGES)) { media.mPages = ((MediaParser.X_AtomNumberNode) entry.mExtra.get(MediaParser.XML_G_PAGES)) .getIntValue(); } } else if (Media.MEDIA_TYPE_GAME == media.mMediaType) { // Game media // TODO } // Log.d(LTAG, "Media: " + media.toString()); result.add(media); } return result; } private void updateFeed(Uri uri, Handler handler) { // Log.d(LTAG, "Fetching feed: " + uri); // Convert Uri to URI... yes, it sucks. URI feed_uri = null; try { feed_uri = new URI(uri.toString()); } catch (URISyntaxException ex) { Log.e(LTAG, "Invalid feed URI: " + uri); Message m = handler.obtainMessage(FR_INVALID_FEED_URI); m.sendToTarget(); return; } // Construct request HttpGet request = new HttpGet(feed_uri); request.addHeader("Referer", REFERER_URL); HttpResponse response; try { response = sClient.execute(request); // Read response HttpEntity entity = response.getEntity(); if (null == entity) { Log.e(LTAG, "Feed is empty: " + uri); Message m = handler.obtainMessage(FR_EMPTY_FEED); m.sendToTarget(); return; } MediaParser parser = new MediaParser(); MediaParser.AtomFeed feed = parser.parse(entity.getContent()); if (null == feed) { Log.e(LTAG, "Unable to parse feed, exiting: " + uri); Message m = handler.obtainMessage(FR_FEED_PARSE_FAILURE); m.sendToTarget(); return; } LinkedList<Media> result = parseFeed(feed); if (null != result && result.size() > 0) { Message m = handler.obtainMessage(FR_SUCCESS, result); m.sendToTarget(); } else { Message m = handler.obtainMessage(FR_EMPTY_FEED); m.sendToTarget(); } } catch (IOException ex) { Log.w(LTAG, "IO exception: " + ex); Message m = handler.obtainMessage(FR_NETWORK_ERROR); m.sendToTarget(); } catch (Exception ex) { Log.e(LTAG, "An exception occurred when reading the feed: " + ex); Message m = handler.obtainMessage(FR_UNKNOWN_ERROR); m.sendToTarget(); } } }