List of usage examples for org.springframework.transaction TransactionDefinition PROPAGATION_REQUIRED
int PROPAGATION_REQUIRED
To view the source code for org.springframework.transaction TransactionDefinition PROPAGATION_REQUIRED.
Click Source Link
From source file:jp.terasoluna.fw.batch.util.BatchUtilTest.java
/** * testGetTransactionDefinition02//from w ww. j a v a 2s. c o m * @throws Exception */ @Test public void testGetTransactionDefinition02() throws Exception { // TransactionDefinition result = BatchUtil.getTransactionDefinition( TransactionDefinition.PROPAGATION_REQUIRED, TransactionDefinition.ISOLATION_DEFAULT, TransactionDefinition.TIMEOUT_DEFAULT, false); // ? assertNotNull(result); }
From source file:jp.terasoluna.fw.batch.util.BatchUtilTest.java
/** * testStartTransaction04//from w ww. ja v a 2 s . c o m * @throws Exception */ @Test public void testStartTransaction04() throws Exception { // PlatformTransactionManager tran = new PlatformTransactionManagerStub(); // TransactionStatus result = BatchUtil.startTransaction(tran, TransactionDefinition.PROPAGATION_REQUIRED, TransactionDefinition.ISOLATION_DEFAULT, TransactionDefinition.TIMEOUT_DEFAULT, false); // ? assertNotNull(result); }
From source file:jp.terasoluna.fw.batch.util.BatchUtilTest.java
/** * testStartTransaction05//from w ww .ja v a 2 s . c om * @throws Exception */ @Test public void testStartTransaction05() throws Exception { // PlatformTransactionManager tran = new PlatformTransactionManagerStub(); // TransactionStatus result = BatchUtil.startTransaction(tran, TransactionDefinition.PROPAGATION_REQUIRED, TransactionDefinition.ISOLATION_DEFAULT, TransactionDefinition.TIMEOUT_DEFAULT, false, log); // ? assertNotNull(result); }
From source file:net.longfalcon.newsj.Backfill.java
private void backfillGroup(NewsClient nntpClient, Group group) { System.out.println("Processing " + group.getName()); try {/*w w w .j a v a 2 s . c om*/ long startLoop = System.currentTimeMillis(); NewsgroupInfo newsgroupInfo = new NewsgroupInfo(); boolean exists = nntpClient.selectNewsgroup(group.getName(), newsgroupInfo); if (!exists) { System.out.println("Could not select group (bad name?): " + group.getName()); return; } int backfillTarget = group.getBackfillTarget(); long targetPost = dayToPost(nntpClient, group, backfillTarget, true); long localFirstRecord = group.getFirstRecord(); long localLastRecord = group.getLastRecord(); if (localFirstRecord == 0 || localLastRecord == 0) { _log.warn("Group " + group.getName() + " has invalid numbers. Have you run update on it? Have you set the backfill days amount?"); return; } Period daysServerHasPeriod = new Period( postDate(nntpClient, newsgroupInfo.getFirstArticleLong(), false), postDate(nntpClient, newsgroupInfo.getLastArticleLong(), false)); Period localDaysPeriod = new Period(postDate(nntpClient, localFirstRecord, false), new DateTime()); _log.info(String.format( "Group %s: server has %s - %s, or %s.\nLocal first = %s (%s). Backfill target of %s days is post %s", newsgroupInfo.getNewsgroup(), newsgroupInfo.getFirstArticleLong(), newsgroupInfo.getLastArticleLong(), _periodFormatter.print(daysServerHasPeriod), localFirstRecord, _periodFormatter.print(localDaysPeriod), backfillTarget, targetPost)); if (targetPost >= localFirstRecord) { //if our estimate comes back with stuff we already have, finish _log.info("Nothing to do, we already have the target post."); return; } //get first and last part numbers from newsgroup if (targetPost < newsgroupInfo.getFirstArticleLong()) { _log.warn( "WARNING: Backfill came back as before server's first. Setting targetpost to server first."); targetPost = newsgroupInfo.getFirstArticleLong(); } //calculate total number of parts long total = localFirstRecord - targetPost; boolean done = false; //set first and last, moving the window by maxxMssgs long last = localFirstRecord - 1L; long first = last - FetchBinaries.MESSAGE_BUFFER + 1L; //set initial "chunk" if (targetPost > first) { first = targetPost; } while (!done) { TransactionStatus transaction = transactionManager.getTransaction( new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED)); _log.info(String.format("Getting %s parts (%s in queue)", last - first + 1, first - targetPost)); fetchBinaries.scan(nntpClient, group, first, last, "backfill", false); // TODO add support for compressed headers group.setFirstRecord(first); group.setLastUpdated(new Date()); groupDAO.update(group); if (first == targetPost) { done = true; } else { //Keep going: set new last, new first, check for last chunk. last = first - 1; first = last - FetchBinaries.MESSAGE_BUFFER + 1; if (targetPost > first) { first = targetPost; } } transactionManager.commit(transaction); } DateTime firstRecordPostDate = postDate(nntpClient, first, false); Date firstRecordPostDateDate = null; if (firstRecordPostDate != null) { firstRecordPostDateDate = firstRecordPostDate.toDate(); } group.setFirstRecordPostdate(firstRecordPostDateDate); group.setLastUpdated(new Date()); groupDAO.update(group); Period groupTime = new Period(startLoop, System.currentTimeMillis()); _log.info("Group processed in " + _periodFormatter.print(groupTime)); } catch (Exception e) { _log.error(e, e); } }
From source file:net.longfalcon.newsj.FetchBinaries.java
@Transactional(propagation = Propagation.REQUIRED, isolation = Isolation.READ_COMMITTED) public long scan(NewsClient nntpClient, Group group, long firstArticle, long lastArticle, String type, boolean compressedHeaders) throws IOException { // this is a hack - tx is not working ATM TransactionStatus transaction = transactionManager .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED)); long startHeadersTime = System.currentTimeMillis(); long maxNum = 0; Map<String, Message> messages = new LinkedHashMap<>(MESSAGE_BUFFER + 1); Iterable<NewsArticle> articlesIterable = null; try {/* www .j a v a2 s. c om*/ if (compressedHeaders) { _log.warn("Compressed Headers setting not currently functional"); articlesIterable = nntpClient.iterateArticleInfo(firstArticle, lastArticle); } else { articlesIterable = nntpClient.iterateArticleInfo(firstArticle, lastArticle); } } catch (IOException e) { _log.error(e.toString()); if (nntpClient.getReplyCode() == 400) { _log.info("NNTP connection timed out. Reconnecting..."); nntpClient = nntpConnectionFactory.getNntpClient(); nntpClient.selectNewsgroup(group.getName()); articlesIterable = nntpClient.iterateArticleInfo(firstArticle, lastArticle); } } Period headersTime = new Period(startHeadersTime, System.currentTimeMillis()); Set<Long> rangeRequested = ArrayUtil.rangeSet(firstArticle, lastArticle); Set<Long> messagesReceived = new HashSet<>(); Set<Long> messagesBlacklisted = new HashSet<>(); Set<Long> messagesIgnored = new HashSet<>(); Set<Long> messagesInserted = new HashSet<>(); Set<Long> messagesNotInserted = new HashSet<>(); // check error codes? long startUpdateTime = System.currentTimeMillis(); if (articlesIterable != null) { for (NewsArticle article : articlesIterable) { long articleNumber = article.getArticleNumberLong(); if (articleNumber == 0) { continue; } messagesReceived.add(articleNumber); Pattern pattern = Defaults.PARTS_SUBJECT_REGEX; String subject = article.getSubject(); Matcher matcher = pattern.matcher(subject); if (ValidatorUtil.isNull(subject) || !matcher.find()) { // not a binary post most likely.. continue messagesIgnored.add(articleNumber); if (_log.isDebugEnabled()) { _log.debug(String.format("Skipping message no# %s : %s", articleNumber, subject)); } continue; } //Filter binaries based on black/white list if (isBlacklisted(article, group)) { messagesBlacklisted.add(articleNumber); continue; } String group1 = matcher.group(1); String group2 = matcher.group(2); if (ValidatorUtil.isNumeric(group1) && ValidatorUtil.isNumeric(group2)) { int currentPart = Integer.parseInt(group1); int maxParts = Integer.parseInt(group2); subject = (matcher.replaceAll("")).trim(); if (!messages.containsKey(subject)) { messages.put(subject, new Message(article, currentPart, maxParts)); } else if (currentPart > 0) { Message message = messages.get(subject); String articleId = article.getArticleId(); String messageId = articleId.substring(1, articleId.length() - 1); int size = article.getSize(); message.addPart(currentPart, messageId, articleNumber, size); messages.put(subject, message); } } } long count = 0; long updateCount = 0; long partCount = 0; maxNum = lastArticle; // add all the requested then remove the ones we did receive. Set<Long> rangeNotRecieved = new HashSet<>(); rangeNotRecieved.addAll(rangeRequested); rangeNotRecieved.removeAll(messagesReceived); if (!type.equals("partrepair")) { _log.info(String.format("Received %d articles of %d requested, %d blacklisted, %d not binaries", messagesReceived.size(), lastArticle - firstArticle + 1, messagesBlacklisted.size(), messagesIgnored.size())); } if (rangeNotRecieved.size() > 0) { switch (type) { case "backfill": // don't add missing articles break; case "partrepair": case "update": default: addMissingParts(rangeNotRecieved, group); break; } _log.info("Server did not return article numbers " + ArrayUtil.stringify(rangeNotRecieved)); } if (!messages.isEmpty()) { long dbUpdateTime = 0; maxNum = firstArticle; //insert binaries and parts into database. when binary already exists; only insert new parts for (Map.Entry<String, Message> entry : messages.entrySet()) { String subject = entry.getKey(); Message message = entry.getValue(); Map<Integer, MessagePart> partsMap = message.getPartsMap(); if (!ValidatorUtil.isNull(subject) && !partsMap.isEmpty()) { String binaryHash = EncodingUtil .md5Hash(subject + message.getFrom() + String.valueOf(group.getId())); Binary binary = binaryDAO.findByBinaryHash(binaryHash); if (binary == null) { long startDbUpdateTime = System.currentTimeMillis(); binary = new Binary(); binary.setName(subject); binary.setFromName(message.getFrom()); binary.setDate(message.getDate().toDate()); binary.setXref(message.getxRef()); binary.setTotalParts(message.getMaxParts()); binary.setGroupId(group.getId()); binary.setBinaryHash(binaryHash); binary.setDateAdded(new Date()); binaryDAO.updateBinary(binary); dbUpdateTime += (System.currentTimeMillis() - startDbUpdateTime); count++; if (count % 500 == 0) { _log.info(String.format("%s bin adds...", count)); } } else { updateCount++; if (updateCount % 500 == 0) { _log.info(String.format("%s bin updates...", updateCount)); } } long binaryId = binary.getId(); if (binaryId == 0) { throw new RuntimeException("ID for binary wasnt set."); } for (MessagePart messagePart : message.getPartsMap().values()) { long articleNumber = messagePart.getArticleNumber(); maxNum = (articleNumber > maxNum) ? articleNumber : maxNum; partCount++; // create part - its possible some bugs are happening here. Part part = new Part(); part.setBinaryId(binaryId); part.setMessageId(messagePart.getMessageId()); part.setNumber(messagePart.getArticleNumber()); part.setPartNumber(messagePart.getPartNumber()); part.setSize(messagePart.getSize()); part.setDateAdded(new Date()); try { long startDbUpdateTime = System.currentTimeMillis(); partDAO.updatePart(part); dbUpdateTime += (System.currentTimeMillis() - startDbUpdateTime); messagesInserted.add(messagePart.getArticleNumber()); } catch (Exception e) { _log.error(e.toString()); messagesNotInserted.add(messagePart.getArticleNumber()); } } } } //TODO: determine whether to add to missing articles if insert failed if (messagesNotInserted.size() > 0) { _log.warn("WARNING: Parts failed to insert"); addMissingParts(messagesNotInserted, group); } Period dbUpdatePeriod = new Period(dbUpdateTime); _log.info("Spent " + _periodFormatter.print(dbUpdatePeriod) + " updating the db"); } Period updateTime = new Period(startUpdateTime, System.currentTimeMillis()); if (!type.equals("partrepair")) { _log.info(count + " new, " + updateCount + " updated, " + partCount + " parts."); _log.info(" " + _periodFormatter.print(headersTime) + " headers, " + _periodFormatter.print(updateTime) + " update."); } transactionManager.commit(transaction); return maxNum; } else { _log.error("Error: Can't get parts from server (msgs not array)\n Skipping group"); return 0; } }
From source file:net.longfalcon.newsj.Releases.java
public void processReleases() { String startDateString = DateUtil.displayDateFormatter.print(System.currentTimeMillis()); _log.info(String.format("Starting release update process (%s)", startDateString)); // get site config TODO: use config service Site site = siteDAO.getDefaultSite(); int retcount = 0; Directory nzbBaseDir = fileSystemService.getDirectory("/nzbs"); checkRegexesUptoDate(site.getLatestRegexUrl(), site.getLatestRegexRevision()); // Stage 0//www .j a v a2 s . c om // this is a hack - tx is not working ATM TransactionStatus transaction = transactionManager .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED)); // // Get all regexes for all groups which are to be applied to new binaries // in order of how they should be applied // List<ReleaseRegex> releaseRegexList = releaseRegexDAO.getRegexes(true, "-1", false); for (ReleaseRegex releaseRegex : releaseRegexList) { String releaseRegexGroupName = releaseRegex.getGroupName(); _log.info(String.format("Applying regex %d for group %s", releaseRegex.getId(), ValidatorUtil.isNull(releaseRegexGroupName) ? "all" : releaseRegexGroupName)); // compile the regex early, to test them String regex = releaseRegex.getRegex(); Pattern pattern = Pattern.compile(fixRegex(regex), Pattern.CASE_INSENSITIVE); // remove '/' and '/i' HashSet<Long> groupMatch = new LinkedHashSet<>(); // // Groups ending in * need to be like matched when getting out binaries for groups and children // Matcher matcher = _wildcardPattern.matcher(releaseRegexGroupName); if (matcher.matches()) { releaseRegexGroupName = releaseRegexGroupName.substring(0, releaseRegexGroupName.length() - 1); List<Group> groups = groupDAO.findGroupsByName(releaseRegexGroupName); for (Group group : groups) { groupMatch.add(group.getId()); } } else if (!ValidatorUtil.isNull(releaseRegexGroupName)) { Group group = groupDAO.getGroupByName(releaseRegexGroupName); if (group != null) { groupMatch.add(group.getId()); } } List<Binary> binaries = new ArrayList<>(); if (groupMatch.size() > 0) { // Get out all binaries of STAGE0 for current group binaries = binaryDAO.findByGroupIdsAndProcStat(groupMatch, Defaults.PROCSTAT_NEW); } Map<String, String> arrNoPartBinaries = new LinkedHashMap<>(); DateTime fiveHoursAgo = DateTime.now().minusHours(5); // this for loop should probably be a single transaction for (Binary binary : binaries) { String testMessage = "Test run - Binary Name " + binary.getName(); Matcher groupRegexMatcher = pattern.matcher(binary.getName()); if (groupRegexMatcher.find()) { String reqIdGroup = null; try { reqIdGroup = groupRegexMatcher.group("reqid"); } catch (IllegalArgumentException e) { _log.debug(e.toString()); } String partsGroup = null; try { partsGroup = groupRegexMatcher.group("parts"); } catch (IllegalArgumentException e) { _log.debug(e.toString()); } String nameGroup = null; try { nameGroup = groupRegexMatcher.group("name"); } catch (Exception e) { _log.debug(e.toString()); } _log.debug(testMessage + " matches with: \n reqId = " + reqIdGroup + " parts = " + partsGroup + " and name = " + nameGroup); if ((ValidatorUtil.isNotNull(reqIdGroup) && ValidatorUtil.isNumeric(reqIdGroup)) && ValidatorUtil.isNull(nameGroup)) { nameGroup = reqIdGroup; } if (ValidatorUtil.isNull(nameGroup)) { _log.warn(String.format( "regex applied which didnt return right number of capture groups - %s", regex)); _log.warn(String.format("regex matched: reqId = %s parts = %s and name = %s", reqIdGroup, partsGroup, nameGroup)); continue; } // If theres no number of files data in the subject, put it into a release if it was posted to usenet longer than five hours ago. if ((ValidatorUtil.isNull(partsGroup) && fiveHoursAgo.isAfter(binary.getDate().getTime()))) { // // Take a copy of the name of this no-part release found. This can be used // next time round the loop to find parts of this set, but which have not yet reached 3 hours. // arrNoPartBinaries.put(nameGroup, "1"); partsGroup = "01/01"; } if (ValidatorUtil.isNotNull(nameGroup) && ValidatorUtil.isNotNull(partsGroup)) { if (partsGroup.indexOf('/') == -1) { partsGroup = partsGroup.replaceFirst("(-)|(~)|(\\sof\\s)", "/"); // replace weird parts delimiters } Integer regexCategoryId = releaseRegex.getCategoryId(); Integer reqId = null; if (ValidatorUtil.isNotNull(reqIdGroup) && ValidatorUtil.isNumeric(reqIdGroup)) { reqId = Integer.parseInt(reqIdGroup); } //check if post is repost Pattern repostPattern = Pattern.compile("(repost\\d?|re\\-?up)", Pattern.CASE_INSENSITIVE); Matcher binaryNameRepostMatcher = repostPattern.matcher(binary.getName()); if (binaryNameRepostMatcher.find() && !nameGroup.toLowerCase().matches("^[\\s\\S]+(repost\\d?|re\\-?up)")) { nameGroup = nameGroup + (" " + binaryNameRepostMatcher.group(1)); } String partsStrings[] = partsGroup.split("/"); int relpart = Integer.parseInt(partsStrings[0]); int relTotalPart = Integer.parseInt(partsStrings[1]); binary.setRelName(nameGroup.replace("_", " ")); binary.setRelPart(relpart); binary.setRelTotalPart(relTotalPart); binary.setProcStat(Defaults.PROCSTAT_TITLEMATCHED); binary.setCategoryId(regexCategoryId); binary.setRegexId(releaseRegex.getId()); binary.setReqId(reqId); binaryDAO.updateBinary(binary); } } } } transactionManager.commit(transaction); // this is a hack - tx is not working ATM transaction = transactionManager .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED)); // // Move all binaries from releases which have the correct number of files on to the next stage. // _log.info("Stage 2"); List<MatchedReleaseQuery> matchedReleaseQueries = binaryDAO .findBinariesByProcStatAndTotalParts(Defaults.PROCSTAT_TITLEMATCHED); matchedReleaseQueries = combineMatchedQueries(matchedReleaseQueries); int siteMinFilestoFormRelease = site.getMinFilesToFormRelease(); for (MatchedReleaseQuery matchedReleaseQuery : matchedReleaseQueries) { retcount++; // // Less than the site permitted number of files in a release. Dont discard it, as it may // be part of a set being uploaded. // int minFiles = siteMinFilestoFormRelease; String releaseName = matchedReleaseQuery.getReleaseName(); long matchedReleaseQueryGroup = matchedReleaseQuery.getGroup(); Long matchedReleaseQueryNumberOfBinaries = matchedReleaseQuery.getNumberOfBinaries(); int matchecReleaseTotalParts = matchedReleaseQuery.getReleaseTotalParts(); String fromName = matchedReleaseQuery.getFromName(); Integer reqId = matchedReleaseQuery.getReqId(); Group group = groupDAO.findGroupByGroupId(matchedReleaseQueryGroup); if (group != null && group.getMinFilesToFormRelease() != null) { minFiles = group.getMinFilesToFormRelease(); } if (matchedReleaseQueryNumberOfBinaries < minFiles) { _log.warn(String.format("Number of files in release %s less than site/group setting (%s/%s)", releaseName, matchedReleaseQueryNumberOfBinaries, minFiles)); binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); } else if (matchedReleaseQueryNumberOfBinaries >= matchecReleaseTotalParts) { // Check that the binary is complete List<Binary> releaseBinaryList = binaryDAO.findBinariesByReleaseNameProcStatGroupIdFromName( releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); boolean incomplete = false; for (Binary binary : releaseBinaryList) { long partsCount = partDAO.countPartsByBinaryId(binary.getId()); if (partsCount < binary.getTotalParts()) { float percentComplete = ((float) partsCount / (float) binary.getTotalParts()) * 100; _log.warn(String.format("binary %s from %s has missing parts = %s/%s (%s%% complete)", binary.getId(), releaseName, partsCount, binary.getTotalParts(), percentComplete)); // Allow to binary to release if posted to usenet longer than four hours ago and we still don't have all the parts DateTime fourHoursAgo = DateTime.now().minusHours(4); if (fourHoursAgo.isAfter(new DateTime(binary.getDate()))) { _log.info("allowing incomplete binary " + binary.getId()); } else { incomplete = true; } } } if (incomplete) { _log.warn(String.format("Incorrect number of parts %s-%s-%s", releaseName, matchedReleaseQueryNumberOfBinaries, matchecReleaseTotalParts)); binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); } // // Right number of files, but see if the binary is a allfilled/reqid post, in which case it needs its name looked up // TODO: Does this even work anymore? else if (ValidatorUtil.isNotNull(site.getReqIdUrl()) && ValidatorUtil.isNotNull(reqId)) { // // Try and get the name using the group // _log.info("Looking up " + reqId + " in " + group.getName() + "..."); String newTitle = getReleaseNameForReqId(site.getReqIdUrl(), group, reqId, true); // // if the feed/group wasnt supported by the scraper, then just use the release name as the title. // if (ValidatorUtil.isNull(newTitle) || newTitle.equals("no feed")) { newTitle = releaseName; _log.warn("Group not supported"); } // // Valid release with right number of files and title now, so move it on // if (ValidatorUtil.isNotNull(newTitle)) { binaryDAO.updateBinaryNameAndStatus(newTitle, Defaults.PROCSTAT_READYTORELEASE, releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); } else { // // Item not found, if the binary was added to the index yages ago, then give up. // Timestamp timestamp = binaryDAO.findMaxDateAddedBinaryByReleaseNameProcStatGroupIdFromName( releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); DateTime maxAddedDate = new DateTime(timestamp); DateTime twoDaysAgo = DateTime.now().minusDays(2); if (maxAddedDate.isBefore(twoDaysAgo)) { binaryDAO.updateBinaryNameAndStatus(releaseName, Defaults.PROCSTAT_NOREQIDNAMELOOKUPFOUND, releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); _log.warn("Not found in 48 hours"); } } } else { binaryDAO.updateBinaryNameAndStatus(releaseName, Defaults.PROCSTAT_READYTORELEASE, releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); } } else { // // Theres less than the expected number of files, so update the attempts and move on. // _log.info(String.format("Incorrect number of files for %s (%d/%d)", releaseName, matchedReleaseQueryNumberOfBinaries, matchecReleaseTotalParts)); binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName); } if (retcount % 10 == 0) { _log.info(String.format("-processed %d binaries stage two", retcount)); } } transactionManager.commit(transaction); retcount = 0; int nfoCount = 0; // this is a hack - tx is not working ATM transaction = transactionManager .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED)); // // Get out all distinct relname, group from binaries of STAGE2 // _log.info("Stage 3"); List<MatchedReleaseQuery> readyReleaseQueries = binaryDAO .findBinariesByProcStatAndTotalParts(Defaults.PROCSTAT_READYTORELEASE); readyReleaseQueries = combineMatchedQueries(readyReleaseQueries); for (MatchedReleaseQuery readyReleaseQuery : readyReleaseQueries) { retcount++; String releaseName = readyReleaseQuery.getReleaseName(); int numParts = readyReleaseQuery.getReleaseTotalParts(); long binaryCount = readyReleaseQuery.getNumberOfBinaries(); long groupId = readyReleaseQuery.getGroup(); // // Get the last post date and the poster name from the binary // String fromName = readyReleaseQuery.getFromName(); Timestamp timestamp = binaryDAO.findMaxDateAddedBinaryByReleaseNameProcStatGroupIdFromName(releaseName, Defaults.PROCSTAT_READYTORELEASE, groupId, fromName); DateTime addedDate = new DateTime(timestamp); // // Get all releases with the same name with a usenet posted date in a +1-1 date range. // Date oneDayBefore = addedDate.minusDays(1).toDate(); Date oneDayAfter = addedDate.plusDays(1).toDate(); List<Release> relDupes = releaseDAO.findReleasesByNameAndDateRange(releaseName, oneDayBefore, oneDayAfter); if (!relDupes.isEmpty()) { binaryDAO.updateBinaryNameAndStatus(releaseName, Defaults.PROCSTAT_DUPLICATE, releaseName, Defaults.PROCSTAT_READYTORELEASE, groupId, fromName); continue; } // // Get total size of this release // Done in a big OR statement, not an IN as the mysql binaryID index on parts table // was not being used. // // SM: TODO this should be revisited, using hb mappings long totalSize = 0; int regexAppliedCategoryId = 0; long regexIdUsed = 0; int reqIdUsed = 0; int relTotalParts = 0; float relCompletion; List<Binary> binariesForSize = binaryDAO.findBinariesByReleaseNameProcStatGroupIdFromName(releaseName, Defaults.PROCSTAT_READYTORELEASE, groupId, fromName); long relParts = 0; for (Binary binary : binariesForSize) { if (ValidatorUtil.isNotNull(binary.getCategoryId()) && regexAppliedCategoryId == 0) { regexAppliedCategoryId = binary.getCategoryId(); } if (ValidatorUtil.isNotNull(binary.getRegexId()) && regexIdUsed == 0) { regexIdUsed = binary.getRegexId(); } if (ValidatorUtil.isNotNull(binary.getReqId()) && reqIdUsed == 0) { reqIdUsed = binary.getReqId(); } relTotalParts += binary.getTotalParts(); relParts += partDAO.countPartsByBinaryId(binary.getId()); totalSize += partDAO.sumPartsSizeByBinaryId(binary.getId()); } relCompletion = ((float) relParts / (float) relTotalParts) * 100f; // // Insert the release // String releaseGuid = UUID.randomUUID().toString(); int categoryId; Category category = null; Long regexId; Integer reqId; if (regexAppliedCategoryId == 0) { categoryId = categoryService.determineCategory(groupId, releaseName); } else { categoryId = regexAppliedCategoryId; } if (categoryId > 0) { category = categoryService.getCategory(categoryId); } if (regexIdUsed == 0) { regexId = null; } else { regexId = regexIdUsed; } if (reqIdUsed == 0) { reqId = null; } else { reqId = reqIdUsed; } //Clean release name of '#', '@', '$', '%', '^', '', '', '', '' String cleanReleaseName = releaseName.replaceAll("[^A-Za-z0-9-_\\ \\.]+", ""); Release release = new Release(); release.setName(cleanReleaseName); release.setSearchName(cleanReleaseName); release.setTotalpart(numParts); release.setGroupId(groupId); release.setAddDate(new Date()); release.setGuid(releaseGuid); release.setCategory(category); release.setRegexId(regexId); release.setRageId((long) -1); release.setPostDate(addedDate.toDate()); release.setFromName(fromName); release.setSize(totalSize); release.setReqId(reqId); release.setPasswordStatus(site.getCheckPasswordedRar() == 1 ? -1 : 0); // magic constants release.setCompletion(relCompletion); releaseDAO.updateRelease(release); long releaseId = release.getId(); _log.info("Added release " + cleanReleaseName); // // Tag every binary for this release with its parent release id // remove the release name from the binary as its no longer required // binaryDAO.updateBinaryNameStatusReleaseID("", Defaults.PROCSTAT_RELEASED, releaseId, releaseName, Defaults.PROCSTAT_READYTORELEASE, groupId, fromName); // // Find an .nfo in the release // ReleaseNfo releaseNfo = nfo.determineReleaseNfo(release); if (releaseNfo != null) { nfo.addReleaseNfo(releaseNfo); nfoCount++; } // // Write the nzb to disk // nzb.writeNZBforReleaseId(release, nzbBaseDir, true); if (retcount % 5 == 0) { _log.info("-processed " + retcount + " releases stage three"); } } _log.info("Found " + nfoCount + " nfos in " + retcount + " releases"); // // Process nfo files // if (site.getLookupNfo() != 1) { _log.info("Site config (site.lookupnfo) prevented retrieving nfos"); } else { nfo.processNfoFiles(site.getLookupImdb(), site.getLookupTvRage()); } // // Lookup imdb if enabled // if (site.getLookupImdb() == 1) { movieService.processMovieReleases(); } // // Lookup music if enabled // if (site.getLookupMusic() == 1) { musicService.processMusicReleases(); } // // Lookup games if enabled // if (site.getLookupGames() == 1) { gameService.processConsoleReleases(); } // // Check for passworded releases // if (site.getCheckPasswordedRar() != 1) { _log.info("Site config (site.checkpasswordedrar) prevented checking releases are passworded"); } else { processPasswordedReleases(true); } // // Process all TV related releases which will assign their series/episode/rage data // tvRageService.processTvReleases(site.getLookupTvRage() == 1); // // Get the current datetime again, as using now() in the housekeeping queries prevents the index being used. // DateTime now = new DateTime(); // // Tidy away any binaries which have been attempted to be grouped into // a release more than x times (SM: or is it days?) // int attemtpGroupBinDays = site.getAttemtpGroupBinDays(); _log.info(String.format("Tidying away binaries which cant be grouped after %s days", attemtpGroupBinDays)); DateTime maxGroupBinDays = now.minusDays(attemtpGroupBinDays); binaryDAO.updateProcStatByProcStatAndDate(Defaults.PROCSTAT_WRONGPARTS, Defaults.PROCSTAT_NEW, maxGroupBinDays.toDate()); // // Delete any parts and binaries which are older than the site's retention days // int maxRetentionDays = site.getRawRetentionDays(); DateTime maxRetentionDate = now.minusDays(maxRetentionDays); _log.info(String.format("Deleting parts which are older than %d days", maxRetentionDays)); partDAO.deletePartByDate(maxRetentionDate.toDate()); _log.info(String.format("Deleting binaries which are older than %d days", maxRetentionDays)); binaryDAO.deleteBinaryByDate(maxRetentionDate.toDate()); // // Delete any releases which are older than site's release retention days // int releaseretentiondays = site.getReleaseRetentionDays(); if (releaseretentiondays != 0) { _log.info("Determining any releases past retention to be deleted."); DateTime maxReleaseRetentionDate = DateTime.now().minusDays(releaseretentiondays); List<Release> releasesToDelete = releaseDAO.findReleasesBeforeDate(maxReleaseRetentionDate.toDate()); for (Iterator<Release> iterator = releasesToDelete.iterator(); iterator.hasNext();) { Release release = iterator.next(); releaseDAO.deleteRelease(release); } } transaction.flush(); // may be unneeded transactionManager.commit(transaction); _log.info(String.format("Processed %d releases", retcount)); if (!transaction.isCompleted()) { throw new IllegalStateException("Transaction is not completed or rolled back."); } //return retcount; }
From source file:nl.nn.adapterframework.core.PipeLine.java
public void setTransacted(boolean transacted) { // this.transacted = transacted; ConfigurationWarnings configWarnings = ConfigurationWarnings.getInstance(); if (transacted) { String msg = "Pipeline of [" + owner.getName() + "] implementing setting of transacted=true as transactionAttribute=Required"; configWarnings.add(log, msg);// w ww . j a v a 2 s .com setTransactionAttributeNum(TransactionDefinition.PROPAGATION_REQUIRED); } else { String msg = "Pipeline of [" + owner.getName() + "] implementing setting of transacted=false as transactionAttribute=Supports"; configWarnings.add(log, msg); setTransactionAttributeNum(TransactionDefinition.PROPAGATION_SUPPORTS); } }
From source file:nl.nn.adapterframework.core.PipeLine.java
public boolean isTransacted() { // return transacted; int txAtt = getTransactionAttributeNum(); return txAtt == TransactionDefinition.PROPAGATION_REQUIRED || txAtt == TransactionDefinition.PROPAGATION_REQUIRES_NEW || txAtt == TransactionDefinition.PROPAGATION_MANDATORY; }
From source file:nl.nn.adapterframework.receivers.ReceiverBase.java
/** * Controls the use of XA-transactions./* ww w . j a va 2s. c o m*/ */ public void setTransacted(boolean transacted) { // this.transacted = transacted; ConfigurationWarnings configWarnings = ConfigurationWarnings.getInstance(); if (transacted) { String msg = getLogPrefix() + "implementing setting of transacted=true as transactionAttribute=Required"; configWarnings.add(log, msg); setTransactionAttributeNum(TransactionDefinition.PROPAGATION_REQUIRED); } else { String msg = getLogPrefix() + "implementing setting of transacted=false as transactionAttribute=Supports"; configWarnings.add(log, msg); setTransactionAttributeNum(TransactionDefinition.PROPAGATION_SUPPORTS); } }
From source file:nl.nn.adapterframework.unmanaged.SpringJmsConnector.java
public void configureEndpointConnection(final IPortConnectedListener jmsListener, ConnectionFactory connectionFactory, Destination destination, IbisExceptionListener exceptionListener, String cacheMode, int acknowledgeMode, boolean sessionTransacted, String messageSelector) throws ConfigurationException { super.configureEndpointConnection(jmsListener, connectionFactory, destination, exceptionListener); // Create the Message Listener Container manually. // This is needed, because otherwise the Spring Factory will // call afterPropertiesSet() on the object which will validate // that all required properties are set before we get a chance // to insert our dynamic values from the config. file. this.jmsContainer = createMessageListenerContainer(); if (getReceiver().isTransacted()) { log.debug(getLogPrefix() + "setting transction manager to [" + txManager + "]"); jmsContainer.setTransactionManager(txManager); if (getReceiver().getTransactionTimeout() > 0) { jmsContainer.setTransactionTimeout(getReceiver().getTransactionTimeout()); }//from www . ja va2 s .com TX = new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED); } else { log.debug(getLogPrefix() + "setting no transction manager"); } if (sessionTransacted) { jmsContainer.setSessionTransacted(sessionTransacted); } if (StringUtils.isNotEmpty(messageSelector)) { jmsContainer.setMessageSelector(messageSelector); } // Initialize with a number of dynamic properties which come from the configuration file jmsContainer.setConnectionFactory(getConnectionFactory()); jmsContainer.setDestination(getDestination()); jmsContainer.setExceptionListener(this); // the following is not required, the timeout set is the time waited to start a new poll attempt. //this.jmsContainer.setReceiveTimeout(getJmsListener().getTimeOut()); if (getReceiver().getNumThreads() > 0) { jmsContainer.setMaxConcurrentConsumers(getReceiver().getNumThreads()); } else { jmsContainer.setMaxConcurrentConsumers(1); } jmsContainer.setIdleTaskExecutionLimit(IDLE_TASK_EXECUTION_LIMIT); if (StringUtils.isNotEmpty(cacheMode)) { jmsContainer.setCacheLevelName(cacheMode); } else { if (getReceiver().isTransacted()) { jmsContainer.setCacheLevel(DEFAULT_CACHE_LEVEL_TRANSACTED); } else { jmsContainer.setCacheLevel(DEFAULT_CACHE_LEVEL_NON_TRANSACTED); } } if (acknowledgeMode >= 0) { jmsContainer.setSessionAcknowledgeMode(acknowledgeMode); } jmsContainer.setMessageListener(this); // Use Spring BeanFactory to complete the auto-wiring of the JMS Listener Container, // and run the bean lifecycle methods. try { ((AutowireCapableBeanFactory) this.beanFactory).configureBean(this.jmsContainer, "proto-jmsContainer"); } catch (BeansException e) { throw new ConfigurationException(getLogPrefix() + "Out of luck wiring up and configuring Default JMS Message Listener Container for JMS Listener [" + (getListener().getName() + "]"), e); } // Finally, set bean name to something we can make sense of if (getListener().getName() != null) { jmsContainer.setBeanName(getListener().getName()); } else { jmsContainer.setBeanName(getReceiver().getName()); } }