List of usage examples for java.util Map clear
void clear();
From source file:edu.msu.cme.rdp.seqmatch.cli.SeqmatchCheckRevSeq.java
/** * Do sequence match without check reverse string * @param inFileName/*w w w . j av a 2 s .c om*/ * @param outFileName * @param traineeFile * @param numOfResults * @param format * @throws IOException */ public void doUserLibMatch(String inFileName, String traineeFile, String outFileName, int numOfResults, String format, String traineeDesc) throws IOException { SeqMatch match = CreateMultiMatchFromFile.getMultiMatch(new File(traineeFile)); if (traineeDesc == null) { traineeDesc = traineeFile; } BufferedWriter writer = new BufferedWriter(new FileWriter(outFileName)); Map<String, SeqMatchResultSet> resultMap = new HashMap(); Sequence seq; SequenceReader reader = new SequenceReader(new File(inFileName)); while ((seq = reader.readNextSequence()) != null) { SeqMatchResultSet resultSet = match.match(seq, numOfResults); resultMap.put(seq.getSeqName(), resultSet); printResult(resultMap, format, writer, traineeDesc); resultMap.clear(); } reader.close(); writer.close(); }
From source file:DvdLibraryDaoTest.java
@Test public void findDvds() throws ParseException { Dvd d = new Dvd(); d.setTitle("Whiplash"); d.setReleaseDate(sdf.parse("10/15/2014")); d.setMpaaRating("R"); d.setDirectorName("Damien Chazelle"); d.setStudio("Sony Pictures"); d.setUserNote("Provocative and emotional"); dao.addDVD(d);/* ww w. ja v a 2 s . com*/ Dvd d2 = new Dvd(); d2.setTitle("Jurassic World"); d2.setReleaseDate(sdf.parse("06/10/20115")); d2.setMpaaRating("PG-13"); d2.setDirectorName("Colin Trevorrow"); d2.setStudio("Amblin Entertainment"); d2.setUserNote("Exciting 3D action adventure movie"); dao.addDVD(d2); Dvd d3 = new Dvd(); d3.setTitle("Children of Men"); d3.setReleaseDate(sdf.parse("01/05/2007")); d3.setMpaaRating("R"); d3.setDirectorName("Alfonso Cuaron"); d3.setStudio("Universal Pictures"); d3.setUserNote("A good mix of thriller, drama and action"); dao.addDVD(d3); Dvd d4 = new Dvd(); d4.setTitle("Y Tu Mama Tambien"); d4.setReleaseDate(sdf.parse("04/26/2002")); d4.setMpaaRating("R"); d4.setDirectorName("Alfonso Cuaron"); d4.setStudio("20th Century Fox"); d4.setUserNote("Haven't seen it yet"); dao.addDVD(d4); Map<SearchTerm, String> criteria = new HashMap<>(); criteria.put(SearchTerm.DIRECTOR_NAME, "Alfonso Cuaron"); List<Dvd> cList = dao.searchDvds(criteria); Assert.assertEquals(2, cList.size()); Assert.assertEquals(d3, cList.get(0)); criteria.put(SearchTerm.DIRECTOR_NAME, "George Lucas"); cList = dao.searchDvds(criteria); Assert.assertEquals(0, cList.size()); criteria.clear(); criteria.put(SearchTerm.MPAA_RATING, "R"); cList = dao.searchDvds(criteria); Assert.assertEquals(3, cList.size()); Assert.assertEquals(d, cList.get(0)); criteria.clear(); criteria.put(SearchTerm.TITLE, "Jurassic World"); cList = dao.searchDvds(criteria); Assert.assertEquals(1, cList.size()); Assert.assertEquals(d2, cList.get(0)); Assert.assertEquals("Amblin Entertainment", cList.get(0).getStudio()); }
From source file:com.flexive.war.javascript.tree.ContentTreeWriter.java
private void writeContentNode(FxEnvironment environment, TreeNodeWriter writer, FxTreeNode node, Map<String, Object> properties, List<String> actionsDisabled, boolean pathMode) throws IOException { final boolean liveTreeEnabled; try {//from www .j av a 2 s .c o m liveTreeEnabled = EJBLookup.getConfigurationEngine().get(SystemParameters.TREE_LIVE_ENABLED); } catch (FxApplicationException e) { throw e.asRuntimeException(); } properties.clear(); actionsDisabled.clear(); properties.put("objectId", node.getId()); properties.put("widgetId", "node_" + node.getId()); properties.put("isDirty", liveTreeEnabled && node.isDirty()); properties.put("mayEdit", node.isMayEdit()); final boolean locked = isLocked(node); properties.put("locked", locked); if (node.hasReference()) { properties.put("referenceId", node.getReference().getId()); properties.put("referenceTypeId", node.getReferenceTypeId()); } setAllowedActions(actionsDisabled, node, liveTreeEnabled); if (actionsDisabled.size() > 0) { properties.put("actionsDisabled", actionsDisabled); } final String docType; if (locked) { docType = node.getLock().getUserId() == FxContext.getUserTicket().getUserId() ? DOCTYPE_LOCKED_OWN : DOCTYPE_LOCKED; } else if (node.getReferenceTypeId() != -1) { docType = DOCTYPE_CONTENT + node.getReferenceTypeId(); } else { docType = DOCTYPE_NODE; } final String label = pathMode ? node.getName() : node.getLabel().getBestTranslation(); properties.put("nodeText", label); if (node.isLeaf()) { writer.writeNode(new Node(String.valueOf(node.getId()), label, docType, properties)); } else { if (node.getChildren().size() == 0 && node.getDirectChildCount() > 0) { properties.put("isFolder", true); } if (environment.getType(node.getReferenceTypeId()).isDerivedFrom(FxType.FOLDER)) { properties.put("isFolderType", true); } writer.startNode(new Node(String.valueOf(node.getId()), label + " [" + node.getDirectChildCount() + "]", docType, properties)); writer.startChildren(); int count = 0; for (FxTreeNode child : node.getChildren()) { writeContentNode(environment, writer, child, properties, actionsDisabled, pathMode); if (node.getId() != FxTreeNode.ROOT_NODE && ++count > MAX_CHILD_NODES) { // render placeholder, skip rest of nodes writer.writeNode(new Node("-1", "...", DOCTYPE_CONTENT, new HashMap<String, Object>(0))); break; } } writer.closeChildren(); writer.closeNode(); } }
From source file:com.mycompany.sparkrentals.client.RentalSolrClientTest.java
/** * Test of searchRentals method, of class RentalSolrClient. *//* ww w .j ava 2s. c o m*/ @Test public void testSearchRentalByExactFilter() throws IOException, SolrServerException { Map<String, Object> data = new HashMap<>(); //test city constraint data.put("city", "city1"); List<Rental> rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.getCity(), "city1"); } //test province constraint data.clear(); data.put("province", "province2"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.getProvince(), "province2"); } //test country constraint data.clear(); data.put("country", "country9"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.getCountry(), "country9"); } //test country constraint data.clear(); data.put("zipCode", "fjkso"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.getZipCode(), "fjkso"); } //test type constraint data.clear(); data.put("type", "Villa"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.getType(), "Villa"); } //test has air condition constraint data.clear(); data.put("hasAirCondition", "Yes"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.isHasAirCondition(), true); } //test has pool data.clear(); data.put("hasPool", "Yes"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.isHasPool(), true); } //test has garden data.clear(); data.put("hasGarden", "Yes"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.isHasGarden(), true); } //test is close to beach data.clear(); data.put("isCloseToBeach", "Yes"); rentals = client.searchRentals(data, 20).getBeans(Rental.class); assertTrue(rentals.size() > 0); for (Rental rental : rentals) { assertEquals(rental.isIsCloseToBeach(), true); } }
From source file:com.redhat.rhtracking.core.services.InvoiceServiceHandler.java
@Override public Map<String, Object> updateInvoice(Map<String, Object> event) { if (event.containsKey("invoiceNumber")) { event.put("status", InvoiceStatus.CONFIRMED.toString()); Map<String, Object> request = new HashMap<>(); request.put("invoiceOrderId", (long) event.get("id")); Map<String, Object> opportunity = opportunityPersistanceService.findBy(request); Map<String, Object> opportunityHours = opportunityPersistanceService.listOpportunityHours(opportunity); List<Map<String, Object>> listHours = (List<Map<String, Object>>) opportunityHours.get("list"); request.clear(); request.put("id", event.get("id")); Map<String, Object> invoice = invoicePersistanceService.findInvoiceById(event); for (Map<String, Object> ihour : (List<Map<String, Object>>) invoice.get("listHours")) { HourType invoicetype = HourType.valueOf((String) ihour.get("type")); for (Map<String, Object> ohour : listHours) { HourType opportunitytype = HourType.valueOf((String) ohour.get("hourType")); if (invoicetype == opportunitytype) { int billedhours = (int) ohour.get("billedHours") + (int) ihour.get("quantity"); ohour.put("billedHours", billedhours); opportunityPersistanceService.updateOpportunityHours(ohour); break; }/*from ww w. j av a 2s. c o m*/ } } // update invoice status invoice.put("invoiceStatus", InvoiceStatus.CONFIRMED.toString()); invoice.put("invoiceNumber", event.get("invoiceNumber")); return invoicePersistanceService.updateInvoice(invoice); } throw new UnsupportedOperationException("not supported yet"); }
From source file:edu.uci.ics.hyracks.api.rewriter.ActivityClusterGraphRewriter.java
/** * rewrite an activity cluster internally * //from ww w .j a v a2 s .c o m * @param ac * the activity cluster to be rewritten */ private void rewriteIntraActivityCluster(ActivityCluster ac, Map<IActivity, SuperActivity> invertedActivitySuperActivityMap) { Map<ActivityId, IActivity> activities = ac.getActivityMap(); Map<ActivityId, List<IConnectorDescriptor>> activityInputMap = ac.getActivityInputMap(); Map<ActivityId, List<IConnectorDescriptor>> activityOutputMap = ac.getActivityOutputMap(); Map<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> connectorActivityMap = ac .getConnectorActivityMap(); ActivityClusterGraph acg = ac.getActivityClusterGraph(); Map<ActivityId, IActivity> startActivities = new HashMap<ActivityId, IActivity>(); Map<ActivityId, SuperActivity> superActivities = new HashMap<ActivityId, SuperActivity>(); Map<ActivityId, Queue<IActivity>> toBeExpendedMap = new HashMap<ActivityId, Queue<IActivity>>(); /** * Build the initial super activities */ for (Entry<ActivityId, IActivity> entry : activities.entrySet()) { ActivityId activityId = entry.getKey(); IActivity activity = entry.getValue(); if (activityInputMap.get(activityId) == null) { startActivities.put(activityId, activity); /** * use the start activity's id as the id of the super activity */ createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, activityId, activity); } } /** * expand one-to-one connected activity cluster by the BFS order. * after the while-loop, the original activities are partitioned * into equivalent classes, one-per-super-activity. */ Map<ActivityId, SuperActivity> clonedSuperActivities = new HashMap<ActivityId, SuperActivity>(); while (toBeExpendedMap.size() > 0) { clonedSuperActivities.clear(); clonedSuperActivities.putAll(superActivities); for (Entry<ActivityId, SuperActivity> entry : clonedSuperActivities.entrySet()) { ActivityId superActivityId = entry.getKey(); SuperActivity superActivity = entry.getValue(); /** * for the case where the super activity has already been swallowed */ if (superActivities.get(superActivityId) == null) { continue; } /** * expend the super activity */ Queue<IActivity> toBeExpended = toBeExpendedMap.get(superActivityId); if (toBeExpended == null) { /** * Nothing to expand */ continue; } IActivity expendingActivity = toBeExpended.poll(); List<IConnectorDescriptor> outputConnectors = activityOutputMap .get(expendingActivity.getActivityId()); if (outputConnectors != null) { for (IConnectorDescriptor outputConn : outputConnectors) { Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = connectorActivityMap .get(outputConn.getConnectorId()); IActivity newActivity = endPoints.getRight().getLeft(); SuperActivity existingSuperActivity = invertedActivitySuperActivityMap.get(newActivity); if (outputConn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) { /** * expend the super activity cluster on an one-to-one out-bound connection */ if (existingSuperActivity == null) { superActivity.addActivity(newActivity); toBeExpended.add(newActivity); invertedActivitySuperActivityMap.put(newActivity, superActivity); } else { /** * the two activities already in the same super activity */ if (existingSuperActivity == superActivity) { continue; } /** * swallow an existing super activity */ swallowExistingSuperActivity(superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, superActivity, superActivityId, existingSuperActivity); } } else { if (existingSuperActivity == null) { /** * create new activity */ createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, newActivity.getActivityId(), newActivity); } } } } /** * remove the to-be-expended queue if it is empty */ if (toBeExpended.size() == 0) { toBeExpendedMap.remove(superActivityId); } } } Map<ConnectorDescriptorId, IConnectorDescriptor> connMap = ac.getConnectorMap(); Map<ConnectorDescriptorId, RecordDescriptor> connRecordDesc = ac.getConnectorRecordDescriptorMap(); Map<SuperActivity, Integer> superActivityProducerPort = new HashMap<SuperActivity, Integer>(); Map<SuperActivity, Integer> superActivityConsumerPort = new HashMap<SuperActivity, Integer>(); for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) { superActivityProducerPort.put(entry.getValue(), 0); superActivityConsumerPort.put(entry.getValue(), 0); } /** * create a new activity cluster to replace the old activity cluster */ ActivityCluster newActivityCluster = new ActivityCluster(acg, ac.getId()); newActivityCluster.setConnectorPolicyAssignmentPolicy(ac.getConnectorPolicyAssignmentPolicy()); for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) { newActivityCluster.addActivity(entry.getValue()); acg.getActivityMap().put(entry.getKey(), newActivityCluster); } /** * Setup connectors: either inside a super activity or among super activities */ for (Entry<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> entry : connectorActivityMap .entrySet()) { ConnectorDescriptorId connectorId = entry.getKey(); Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = entry.getValue(); IActivity producerActivity = endPoints.getLeft().getLeft(); IActivity consumerActivity = endPoints.getRight().getLeft(); int producerPort = endPoints.getLeft().getRight(); int consumerPort = endPoints.getRight().getRight(); RecordDescriptor recordDescriptor = connRecordDesc.get(connectorId); IConnectorDescriptor conn = connMap.get(connectorId); if (conn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) { /** * connection edge between inner activities */ SuperActivity residingSuperActivity = invertedActivitySuperActivityMap.get(producerActivity); residingSuperActivity.connect(conn, producerActivity, producerPort, consumerActivity, consumerPort, recordDescriptor); } else { /** * connection edge between super activities */ SuperActivity producerSuperActivity = invertedActivitySuperActivityMap.get(producerActivity); SuperActivity consumerSuperActivity = invertedActivitySuperActivityMap.get(consumerActivity); int producerSAPort = superActivityProducerPort.get(producerSuperActivity); int consumerSAPort = superActivityConsumerPort.get(consumerSuperActivity); newActivityCluster.addConnector(conn); newActivityCluster.connect(conn, producerSuperActivity, producerSAPort, consumerSuperActivity, consumerSAPort, recordDescriptor); /** * bridge the port */ producerSuperActivity.setClusterOutputIndex(producerSAPort, producerActivity.getActivityId(), producerPort); consumerSuperActivity.setClusterInputIndex(consumerSAPort, consumerActivity.getActivityId(), consumerPort); acg.getConnectorMap().put(connectorId, newActivityCluster); /** * increasing the port number for the producer and consumer */ superActivityProducerPort.put(producerSuperActivity, ++producerSAPort); superActivityConsumerPort.put(consumerSuperActivity, ++consumerSAPort); } } /** * Set up the roots of the new activity cluster */ for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) { List<IConnectorDescriptor> connIds = newActivityCluster.getActivityOutputMap().get(entry.getKey()); if (connIds == null || connIds.size() == 0) { newActivityCluster.addRoot(entry.getValue()); } } /** * set up the blocked2Blocker mapping, which will be updated in the rewriteInterActivityCluster call */ newActivityCluster.getBlocked2BlockerMap().putAll(ac.getBlocked2BlockerMap()); /** * replace the old activity cluster with the new activity cluster */ acg.getActivityClusterMap().put(ac.getId(), newActivityCluster); }
From source file:co.cask.cdap.internal.app.services.http.handlers.DashboardHttpHandlerTest.java
@Test public void testProperties() throws Exception { Map<String, Integer> intMap = Maps.newHashMap(); intMap.put("k1", 123); intMap.put("k2", 324); String dash = createDashboard("newspace", GSON.toJson(intMap), 200); JsonObject jsonObject = getContents("newspace", dash, 200).getAsJsonObject().get("config") .getAsJsonObject();/*from w ww . j ava 2s . c om*/ Assert.assertEquals(2, jsonObject.entrySet().size()); Assert.assertEquals(123, jsonObject.get("k1").getAsInt()); Assert.assertEquals(324, jsonObject.get("k2").getAsInt()); Map<String, String> propMap = Maps.newHashMap(); propMap.put("k2", "value2"); propMap.put("k1", "value1"); addProperty("newspace", dash, propMap, 200); jsonObject = getContents("newspace", dash, 200).getAsJsonObject().get("config").getAsJsonObject(); Assert.assertEquals(2, jsonObject.entrySet().size()); Assert.assertEquals("value1", jsonObject.get("k1").getAsString()); Assert.assertEquals("value2", jsonObject.get("k2").getAsString()); propMap.clear(); propMap.put("m1", "n1"); String anotherDash = createDashboard("newspace", GSON.toJson(propMap), 200); jsonObject = getContents("newspace", anotherDash, 200).getAsJsonObject().get("config").getAsJsonObject(); Assert.assertEquals(1, jsonObject.entrySet().size()); Assert.assertEquals("n1", jsonObject.get("m1").getAsString()); addProperty("newspace", anotherDash, new HashMap<String, String>(), 200); jsonObject = getContents("newspace", anotherDash, 200).getAsJsonObject().get("config").getAsJsonObject(); Assert.assertEquals(0, jsonObject.entrySet().size()); String str = "some123 random string!@#"; createDashboard("space", str, 400); deleteDashboard("newspace", dash, 200); deleteDashboard("newspace", dash, 404); deleteDashboard("newspace", anotherDash, 200); }
From source file:com.ibm.jaggr.core.impl.AbstractAggregatorImplTest.java
/** * Ensure that an exception is thrown when attempting to add alias paths that overlap * (i.e. the path being added is a child or parent of an existing path). * * @throws Exception/*from ww w .j a va 2 s . c o m*/ */ @Test public void testAddAlias_overlappingPathsValidation() throws Exception { TestAggregatorImpl aggregator = EasyMock.createMockBuilder(TestAggregatorImpl.class).createMock(); Map<String, URI> map = new HashMap<String, URI>(); URI res = new URI("/test/resource"); aggregator.addAlias("/test", res, "", map); try { aggregator.addAlias("/test/sub", res, "", map); Assert.fail("Expected exception"); } catch (IllegalArgumentException e) { } map.clear(); aggregator.addAlias("/test/sub1", res, "", map); aggregator.addAlias("/test/sub2", res, "", map); try { aggregator.addAlias("/test", res, "", map); Assert.fail("Expected exception"); } catch (IllegalArgumentException e) { } try { aggregator.addAlias("/test/sub1/foo", res, "", map); Assert.fail("Expected exception"); } catch (IllegalArgumentException e) { } }
From source file:org.ehoffman.aopalliance.extensions.scope.SpringTestScope.java
private void callAllDestroyCallbacks(final Map<String, Runnable> destroyCallbackMap) { try {/* w w w . ja va2 s .c o m*/ synchronized (destroyCallbackMap) { for (final String name : new ArrayList<String>(destroyCallbackMap.keySet())) { try { remove(name); } catch (final Throwable t) { LOGGER.error("Exception thrown while destroying bean", t); t.printStackTrace(); } } destroyCallbackMap.clear(); destroyCallbackMaps.remove(destroyCallbackMap); } } catch (final Throwable t) { t.printStackTrace(); throw new RuntimeException(t); } }
From source file:com.icesoft.faces.webapp.xmlhttp.PersistentFacesState.java
/** * Execute the view associated with this <code>PersistentFacesState</code>. * This is typically followed immediatly by a call to * {@link PersistentFacesState#render}./* www .ja va 2 s .c o m*/ * <p/> * This method obtains and releases the monitor on the FacesContext object. * If starting a JSF lifecycle causes 3rd party frameworks to perform locking * of their resources, releasing this monitor between the call to this method * and the call to {@link PersistentFacesState#render} can allow deadlocks * to occur. Use {@link PersistentFacesState#executeAndRender} instead * * @deprecated this method should not be exposed */ public void execute() throws RenderingException { failIfDisposed(); BridgeFacesContext facesContext = null; try { view.acquireLifecycleLock(); view.installThreadLocals(); facesContext = view.getFacesContext(); // For JSF 1.1, with the inputFile, we need the execute phases to // actually happen, which wasn't the case when the following code // only ran for JSF 1.2. These are the options we have for JSF 1.1: // A. facesContext.renderResponse() skips the phases, so the // FileUploadPhaseListener can't work. We used to do this. // B. Doing nothing means that the old values that // FileUploadPhaseListener puts in the RequestParameterMap stick // around for subsequent file upload lifecycles (no problem) and // server pushes (might cause duplicate inputFile.actionListener // calls). As well as the values from the last user interaction, // which might cause problems too. // C. Clearing the RequestParameterMap leads to skipping the execute, // so that's not sufficient. // D. Just doing the same thing for JSF 1.1 as JSF 1.2 seems to work if (true) { // if (ImplementationUtil.isJSF12()) { //facesContext.renderResponse() skips phase listeners //in JSF 1.2, so do a full execute with no stale input //instead Map requestParameterMap = facesContext.getExternalContext().getRequestParameterMap(); requestParameterMap.clear(); if (SeamUtilities.isSeamEnvironment()) { //ICE-2990/JBSEAM-3426 must have empty requestAttributes for push to work with Seam ((BridgeExternalContext) facesContext.getExternalContext()).removeSeamAttributes(); } //Seam appears to need ViewState set during push // see below // requestParameterMap.put("javax.faces.ViewState", "ajaxpush"); // If state saving is turned on, we need to insert the saved state // restoration key for this user into the request map for JSF. // Even if no state saving, inserting this key will at least // cause the JSF lifecycle to run, which we want to do for // consistency. String postback; if (ImplementationUtil.isJSFStateSaving() && stateRestorationId != null) { postback = stateRestorationId; } else { postback = "not reload"; } facesContext.getExternalContext().getRequestParameterMap().put(BridgeExternalContext.PostBackKey, postback); } lifecycle.execute(facesContext); } catch (Exception e) { release(); view.releaseLifecycleLock(); String viewID = "Unknown View"; try { viewID = facesContext.getViewRoot().getViewId(); } catch (NullPointerException npe) { } log.error("Exception occured during execute push on " + viewID, e); throwRenderingException(e); } }