List of usage examples for java.util Collections synchronizedList
public static <T> List<T> synchronizedList(List<T> list)
From source file:voldemort.client.rebalance.AbstractRebalanceTest.java
@Test public void testServerSideRouting() throws Exception { final Cluster currentCluster = ServerTestUtils.getLocalCluster(2, new int[][] { { 0, 1, 2, 3, 4, 5, 6 }, { 7, 8 } }); final Cluster targetCluster = RebalanceUtils.createUpdatedCluster(currentCluster, 1, Lists.newArrayList(2, 3));//from w w w . j a v a 2 s . c om final List<Integer> serverList = Arrays.asList(0, 1); final Cluster updatedCurrentCluster = startServers(currentCluster, storeDefFileWithReplication, serverList, null); final Cluster updatedTargetCluster = updateCluster(targetCluster); ExecutorService executors = Executors.newFixedThreadPool(2); final AtomicBoolean rebalancingToken = new AtomicBoolean(false); final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>()); // populate data now. RebalanceClientConfig rebalanceClientConfig = new RebalanceClientConfig(); rebalanceClientConfig.setMaxParallelRebalancing(2); final RebalanceController rebalanceClient = new RebalanceController( getBootstrapUrl(updatedCurrentCluster, 0), rebalanceClientConfig); // Populate the two stores populateData(updatedCurrentCluster, roStoreDefWithReplication, rebalanceClient.getAdminClient(), true); populateData(updatedCurrentCluster, rwStoreDefWithReplication, rebalanceClient.getAdminClient(), false); Node node = updatedCurrentCluster.getNodeById(1); final Store<ByteArray, byte[], byte[]> serverSideRoutingStoreRW = getSocketStore(testStoreNameRW, node.getHost(), node.getSocketPort(), true); final Store<ByteArray, byte[], byte[]> serverSideRoutingStoreRO = getSocketStore(testStoreNameRO, node.getHost(), node.getSocketPort(), true); final CountDownLatch latch = new CountDownLatch(1); // start get operation. executors.execute(new Runnable() { public void run() { try { List<String> keys = new ArrayList<String>(testEntries.keySet()); int nRequests = 0; while (!rebalancingToken.get()) { // should always able to get values. int index = (int) (Math.random() * keys.size()); // should get a valid value try { nRequests++; List<Versioned<byte[]>> values = serverSideRoutingStoreRW .get(new ByteArray(ByteUtils.getBytes(keys.get(index), "UTF-8")), null); assertEquals("serverSideRoutingStore should return value.", 1, values.size()); assertEquals("Value returned should be good", new Versioned<String>(testEntries.get(keys.get(index))), new Versioned<String>(ByteUtils.getString(values.get(0).getValue(), "UTF-8"), values.get(0).getVersion())); values = serverSideRoutingStoreRO .get(new ByteArray(ByteUtils.getBytes(keys.get(index), "UTF-8")), null); assertEquals("serverSideRoutingStore should return value.", 1, values.size()); assertEquals("Value returned should be good", new Versioned<String>(testEntries.get(keys.get(index))), new Versioned<String>(ByteUtils.getString(values.get(0).getValue(), "UTF-8"), values.get(0).getVersion())); } catch (UnreachableStoreException e) { // ignore } catch (Exception e) { exceptions.add(e); } } latch.countDown(); } catch (Exception e) { exceptions.add(e); } } }); executors.execute(new Runnable() { public void run() { try { Thread.sleep(500); rebalanceAndCheck(updatedCurrentCluster, updatedTargetCluster, storeDefWithReplication, rebalanceClient, Arrays.asList(0, 1)); Thread.sleep(500); rebalancingToken.set(true); checkConsistentMetadata(targetCluster, serverList); } catch (Exception e) { exceptions.add(e); } finally { // stop servers as soon as the client thread has exited its // loop. try { latch.await(300, TimeUnit.SECONDS); stopServer(serverList); } catch (Exception e) { throw new RuntimeException(e); } } } }); executors.shutdown(); executors.awaitTermination(300, TimeUnit.SECONDS); // check No Exception if (exceptions.size() > 0) { for (Exception e : exceptions) { e.printStackTrace(); } fail("Should not see any exceptions !!"); } }
From source file:ti.modules.titanium.ui.widget.collectionview.TiCollectionView.java
public TiCollectionView(TiViewProxy proxy, Activity activity) { super(proxy); // initializing variables sections = Collections.synchronizedList(new ArrayList<AbsListSectionProxy>()); itemTypeCount = new AtomicInteger(CUSTOM_TEMPLATE_ITEM_TYPE); defaultTemplateBinding = defaultTemplateKey; defaultTemplate.setType(BUILT_IN_TEMPLATE_ITEM_TYPE); processTemplates(null);/* w w w . j a v a2 s . co m*/ caseInsensitive = true; ignoreExactMatch = false; // handling marker HashMap<String, Integer> preloadMarker = ((AbsListViewProxy) proxy).getPreloadMarker(); if (preloadMarker != null) { setMarker(preloadMarker); } else { resetMarker(); } final KrollProxy fProxy = proxy; layoutManager = new TiGridLayoutManager(activity); // trick to get scrollbar to be initialized! mRecyclerView = new FastScrollRecyclerView(activity, null, Resources.getSystem().getIdentifier("listViewStyle", "attr", "android")) { // private boolean viewFocused = false; private boolean selectionSet = false; @Override protected void onLayout(boolean changed, int left, int top, int right, int bottom) { // if ((Build.VERSION.SDK_INT >= 18 && !changed && viewFocused)) // { // viewFocused = false; // super.onLayout(changed, left, top, right, bottom); // return; // } // Starting with API 21, setSelection() triggers another layout // pass, so we need to end it here to prevent // an infinite loop if (Build.VERSION.SDK_INT >= 21 && selectionSet) { selectionSet = false; return; } OnFocusChangeListener focusListener = null; View focusedView = findFocus(); int cursorPosition = -1; if (focusedView != null) { if (focusedView instanceof EditText) { cursorPosition = ((EditText) focusedView).getSelectionStart(); } OnFocusChangeListener listener = focusedView.getOnFocusChangeListener(); if (listener != null && listener instanceof TiUIView) { focusedView.setOnFocusChangeListener(null); focusListener = listener; } } if (focusedView != null) { setDescendantFocusability(ViewGroup.FOCUS_BLOCK_DESCENDANTS); } super.onLayout(changed, left, top, right, bottom); setDescendantFocusability(ViewGroup.FOCUS_AFTER_DESCENDANTS); if (changed) { TiUIHelper.firePostLayoutEvent(TiCollectionView.this); } // Layout is finished, re-enable focus events. if (focusedView != null) { // If the configuration changed, we manually fire the blur // event if (changed) { focusedView.setOnFocusChangeListener(focusListener); if (focusListener != null) { focusListener.onFocusChange(focusedView, false); } } else { // Ok right now focus is with listView. So set it back // to the focusedView // viewFocused = true; focusedView.requestFocus(); focusedView.setOnFocusChangeListener(focusListener); // Restore cursor position if (cursorPosition != -1) { ((EditText) focusedView).setSelection(cursorPosition); selectionSet = true; } } } } // @Override // protected void onLayout(boolean changed, int left, int top, // int right, int bottom) { // // super.onLayout(changed, left, top, right, bottom); // if (changed) { // TiUIHelper.firePostLayoutEvent(TiCollectionView.this); // } // // } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { super.onMeasure(widthMeasureSpec, heightMeasureSpec); if (mColumnsWidth != null && !mColumnsWidth.isUnitFixed()) { layoutManager.setColumnWidth(mColumnsWidth.getAsPixels(this)); // } else if (gridAdapter.updateNumColumns()) { // adapter.notifyDataSetChanged(); } layoutManager.requestColumnUpdate(); } @Override public void dispatchSetPressed(boolean pressed) { if (propagateSetPressed(this, pressed)) { super.dispatchSetPressed(pressed); } } @Override public boolean dispatchTouchEvent(MotionEvent event) { if (touchPassThrough(getParentViewForChild(), event)) return false; return super.dispatchTouchEvent(event); } // @Override // public boolean dispatchTouchEvent(MotionEvent event) { // if (touchPassThrough == true) // return false; // return super.dispatchTouchEvent(event); // } @Override protected void dispatchDraw(Canvas canvas) { try { super.dispatchDraw(canvas); } catch (IndexOutOfBoundsException e) { // samsung error } } }; // mRecyclerView.setClipChildren(false); mAdapter = new TiBaseAdapter(mRecyclerView.getContext(), null); mAdapter.setDisplayHeaders(true); mSwipeMenuTouchListener = new SwipeMenuTouchListener(this); mRecyclerView.addOnItemTouchListener(mSwipeMenuTouchListener); mRecyclerView.setItemAnimator(new TiItemAnimator()); mRecyclerView.setHorizontalScrollBarEnabled(false); mRecyclerView.setVerticalScrollBarEnabled(true); mRecyclerView.setHasFixedSize(true); layoutManager.setSmoothScrollbarEnabled(true); mRecyclerView.addOnScrollListener(new OnScrollListener() { private boolean scrollTouch = false; private Timer endTimer = null; public void cancelEndCall() { if (endTimer != null) { endTimer.cancel(); endTimer = null; } } public void delayEndCall() { cancelEndCall(); endTimer = new Timer(); TimerTask action = new TimerTask() { public void run() { scrollTouch = false; if (fProxy.hasListeners(TiC.EVENT_SCROLLEND, false)) { fProxy.fireEvent(TiC.EVENT_SCROLLEND, dictForScrollEvent(), false, false); } } }; this.endTimer.schedule(action, 200); } @Override public void onScrollStateChanged(RecyclerView view, int scrollState) { view.requestDisallowInterceptTouchEvent(scrollState != ViewPager.SCROLL_STATE_IDLE); if (scrollState == RecyclerView.SCROLL_STATE_IDLE) { if (scrollTouch) { delayEndCall(); } } else if (scrollState == RecyclerView.SCROLL_STATE_SETTLING) { cancelEndCall(); } else if (scrollState == RecyclerView.SCROLL_STATE_DRAGGING) { cancelEndCall(); if (hideKeyboardOnScroll && hasFocus()) { blur(); } if (scrollTouch == false) { scrollTouch = true; if (fProxy.hasListeners(TiC.EVENT_SCROLLSTART, false)) { fProxy.fireEvent(TiC.EVENT_SCROLLSTART, dictForScrollEvent(), false, false); } } } } @Override public void onScrolled(RecyclerView view, int dx, int dy) { if (dx == 0 && dy == 0) { return; } if (fProxy.hasListeners(TiC.EVENT_SCROLL, false)) { fProxy.fireEvent(TiC.EVENT_SCROLL, dictForScrollEvent(), false, false); } } }); // mRecyclerView.setOnStickyHeaderChangedListener( // new OnStickyHeaderChangedListener() { // // @Override // public void onStickyHeaderChanged( // StickyListHeadersListViewAbstract l, View header, // int itemPosition, long headerId) { // // for us headerId is the section index // int sectionIndex = (int) headerId; // if (fProxy.hasListeners(TiC.EVENT_HEADER_CHANGE, // false)) { // KrollDict data = new KrollDict(); // AbsListSectionProxy section = null; // synchronized (sections) { // if (sectionIndex >= 0 // && sectionIndex < sections.size()) { // section = sections.get(sectionIndex); // } else { // return; // } // } // data.put(TiC.PROPERTY_HEADER_VIEW, section // .getHoldedProxy(TiC.PROPERTY_HEADER_VIEW)); // data.put(TiC.PROPERTY_SECTION, section); // data.put(TiC.PROPERTY_SECTION_INDEX, sectionIndex); // fProxy.fireEvent(TiC.EVENT_HEADER_CHANGE, data, // false, false); // } // } // }); mRecyclerView.setEnabled(true); getLayoutParams().autoFillsHeight = true; getLayoutParams().autoFillsWidth = true; mRecyclerView.setFocusable(true); mRecyclerView.setDescendantFocusability(ViewGroup.FOCUS_AFTER_DESCENDANTS); // try { // isCheck = TiRHelper.getApplicationResource( // "drawable.btn_check_buttonless_on_64"); // hasChild = TiRHelper.getApplicationResource("drawable.btn_more_64"); // disclosure = TiRHelper // .getApplicationResource("drawable.disclosure_64"); // } catch (ResourceNotFoundException e) { // Log.e(TAG, "XML resources could not be found!!!", Log.DEBUG_MODE); // } // RelativeLayout layout = new RelativeLayout(proxy.getActivity()) { // @Override // protected void onLayout(boolean changed, int left, int top, // int right, int bottom) { // super.onLayout(changed, left, top, right, bottom); // if (changed) { // TiUIHelper.firePostLayoutEvent(TiCollectionView.this); // } // } // // @Override // public void dispatchSetPressed(boolean pressed) { // if (propagateSetPressed(this, pressed)) { // super.dispatchSetPressed(pressed); // } // } // // @Override // public boolean dispatchTouchEvent(MotionEvent event) { // if (touchPassThrough(getParentViewForChild(), event)) // return false; // return super.dispatchTouchEvent(event); // } // // }; // layout.addView(mRecyclerView); setNativeView(mRecyclerView); // needs to be fired after because // getStickyHeadersHolder will be called and need nativeView mRecyclerView.setLayoutManager(layoutManager); mAdapter.setLayoutManager(layoutManager); mRecyclerView.setAdapter(mAdapter); }
From source file:voldemort.client.rebalance.AbstractZonedRebalanceTest.java
@Test(timeout = 600000) public void testProxyPutDuringRebalancing() throws Exception { logger.info("Starting testProxyPutDuringRebalancing"); try {//from w w w .j a v a 2 s . c o m Cluster currentCluster = ServerTestUtils.getLocalZonedCluster(6, 2, new int[] { 0, 0, 0, 1, 1, 1 }, new int[][] { { 0 }, { 1, 6 }, { 2 }, { 3 }, { 4, 7 }, { 5 } }); Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(7)); finalCluster = UpdateClusterUtils.createUpdatedCluster(finalCluster, 5, Lists.newArrayList(6)); /** * Original partition map * * [s0 : p0] [s1 : p1, p6] [s2 : p2] * * [s3 : p3] [s4 : p4, p7] [s5 : p5] * * final server partition ownership * * [s0 : p0] [s1 : p1] [s2 : p2, p7] * * [s3 : p3] [s4 : p4] [s5 : p5, p6] * * Note that rwStoreDefFileWithReplication is a "2/1/1" store def. * * Original server n-ary partition ownership * * [s0 : p0, p3-7] [s1 : p0-p7] [s2 : p1-2] * * [s3 : p0-3, p6-7] [s4 : p0-p7] [s5 : p4-5] * * final server n-ary partition ownership * * [s0 : p0, p2-7] [s1 : p0-1] [s2 : p1-p7] * * [s3 : p0-3, p5-7] [s4 : p0-4, p7] [s5 : p4-6] */ List<Integer> serverList = Arrays.asList(0, 1, 2, 3, 4, 5); Map<String, String> configProps = new HashMap<String, String>(); configProps.put("admin.max.threads", "5"); final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps); ExecutorService executors = Executors.newFixedThreadPool(2); final AtomicBoolean rebalancingComplete = new AtomicBoolean(false); final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>()); // Its is imperative that we test in a single shot since multiple // batches would mean the proxy bridges being torn down and // established multiple times and we cannot test against the source // cluster topology then. getRebalanceKit uses batch size of // infinite, so this should be fine. String bootstrapUrl = getBootstrapUrl(updatedCurrentCluster, 0); int maxParallel = 2; final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster); populateData(currentCluster, rwStoreDefWithReplication); final AdminClient adminClient = rebalanceKit.controller.getAdminClient(); // the plan would cause these partitions to move: // Partition : Donor -> stealer // // p2 (Z-SEC) : s1 -> s0 // p3-6 (Z-PRI) : s1 -> s2 // p7 (Z-PRI) : s0 -> s2 // // p5 (Z-SEC): s4 -> s3 // p6 (Z-PRI): s4 -> s5 // // :. rebalancing will run on servers 0, 2, 3, & 5 final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 1, rwStoreDefWithReplication.getName(), Arrays.asList(6), 20); assertTrue("Empty list of moving keys...", movingKeysList.size() > 0); final AtomicBoolean rebalancingStarted = new AtomicBoolean(false); final AtomicBoolean proxyWritesDone = new AtomicBoolean(false); final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries); final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>(); for (String key : baselineTuples.keySet()) { baselineVersions.put(key, new VectorClock()); } final CountDownLatch latch = new CountDownLatch(2); // start get operation. executors.execute(new Runnable() { @Override public void run() { SocketStoreClientFactory factory = null; try { // wait for the rebalancing to begin List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0), serverMap.get(2), serverMap.get(3), serverMap.get(5)); while (!rebalancingComplete.get()) { Iterator<VoldemortServer> serverIterator = serverList.iterator(); while (serverIterator.hasNext()) { VoldemortServer server = serverIterator.next(); if (ByteUtils .getString(server.getMetadataStore() .get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8") .compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) { logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE"); serverIterator.remove(); } } if (serverList.size() == 0) { rebalancingStarted.set(true); break; } } if (rebalancingStarted.get()) { factory = new SocketStoreClientFactory( new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 0)) .setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS) .setClientZoneId(1)); final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>( testStoreNameRW, null, factory, 3); // Now perform some writes and determine the end // state of the changed keys. Initially, all data // now with zero vector clock for (ByteArray movingKey : movingKeysList) { try { String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8"); String valStr = "proxy_write"; storeClientRW.put(keyStr, valStr); baselineTuples.put(keyStr, valStr); // all these keys will have [5:1] vector // clock is node 5 is the new pseudo master baselineVersions.get(keyStr).incrementVersion(5, System.currentTimeMillis()); proxyWritesDone.set(true); if (rebalancingComplete.get()) { break; } } catch (InvalidMetadataException e) { // let this go logger.error("Encountered an invalid metadata exception.. ", e); } } } } catch (Exception e) { logger.error("Exception in proxy write thread..", e); exceptions.add(e); } finally { if (factory != null) factory.close(); latch.countDown(); } } }); executors.execute(new Runnable() { @Override public void run() { try { rebalanceKit.rebalance(); } catch (Exception e) { logger.error("Error in rebalancing... ", e); exceptions.add(e); } finally { rebalancingComplete.set(true); latch.countDown(); } } }); latch.await(); executors.shutdown(); executors.awaitTermination(300, TimeUnit.SECONDS); assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true); assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true); checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(0, 1, 2, 3, 4, 5), baselineTuples, baselineVersions); checkConsistentMetadata(finalCluster, serverList); // check No Exception if (exceptions.size() > 0) { for (Exception e : exceptions) { e.printStackTrace(); } fail("Should not see any exceptions."); } // check that the proxy writes were made to the original donor, node // 1 List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size()); for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis())); VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis()); for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock); adminClient.setAdminClientCluster(currentCluster); checkForTupleEquivalence(adminClient, 1, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions); // stop servers try { stopServer(serverList); } catch (Exception e) { throw new RuntimeException(e); } } catch (AssertionError ae) { logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae); throw ae; } }
From source file:org.tinymediamanager.core.movie.MovieList.java
/** * Gets the movie set list./* w ww .ja va 2s .c o m*/ * * @return the movieSetList */ public List<MovieSet> getMovieSetList() { if (movieSetList == null) { movieSetList = ObservableCollections .observableList(Collections.synchronizedList(new ArrayList<MovieSet>())); } return movieSetList; }
From source file:voldemort.client.rebalance.ZonedRebalanceNonContiguousZonesTest.java
@Test(timeout = 600000) public void testProxyPutDuringRebalancing() throws Exception { logger.info("Starting testProxyPutDuringRebalancing"); try {/*w w w .j a v a 2 s. co m*/ int zoneIds[] = new int[] { 1, 3 }; int nodesPerZone[][] = new int[][] { { 3, 4, 5 }, { 9, 10, 11 } }; int partitionMap[][] = new int[][] { { 0 }, { 1, 6 }, { 2 }, { 3 }, { 4, 7 }, { 5 } }; Cluster currentCluster = ServerTestUtils.getLocalNonContiguousZonedCluster(zoneIds, nodesPerZone, partitionMap, ClusterTestUtils.getClusterPorts()); Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 5, Lists.newArrayList(7)); finalCluster = UpdateClusterUtils.createUpdatedCluster(finalCluster, 11, Lists.newArrayList(6)); /** * Original partition map * * [s3 : p0] [s4 : p1, p6] [s5 : p2] * * [s9 : p3] [s10 : p4, p7] [s11 : p5] * * final server partition ownership * * [s3 : p0] [s4 : p1] [s5 : p2, p7] * * [s9 : p3] [s10 : p4] [s11 : p5, p6] * * Note that rwStoreDefFileWithReplication is a "2/1/1" store def. * * Original server n-ary partition ownership * * [s3 : p0, p3-7] [s4 : p0-p7] [s5 : p1-2] * * [s9 : p0-3, p6-7] [s10 : p0-p7] [s11 : p4-5] * * final server n-ary partition ownership * * [s3 : p0, p2-7] [s4 : p0-1] [s5 : p1-p7] * * [s9 : p0-3, p5-7] [s10 : p0-4, p7] [s11 : p4-6] */ List<Integer> serverList = Arrays.asList(3, 4, 5, 9, 10, 11); Map<String, String> configProps = new HashMap<String, String>(); configProps.put("admin.max.threads", "5"); final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps); ExecutorService executors = Executors.newFixedThreadPool(2); final AtomicBoolean rebalancingComplete = new AtomicBoolean(false); final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>()); // Its is imperative that we test in a single shot since multiple batches would mean the proxy bridges // being torn down and established multiple times and we cannot test against the source // cluster topology then. getRebalanceKit uses batch size of infinite, so this should be fine. String bootstrapUrl = getBootstrapUrl(updatedCurrentCluster, 3); int maxParallel = 2; final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster); populateData(currentCluster, rwStoreDefWithReplication); final AdminClient adminClient = rebalanceKit.controller.getAdminClient(); // the plan would cause these partitions to move: // Partition : Donor -> stealer // // p2 (Z-SEC) : s4 -> s3 // p3-6 (Z-PRI) : s4 -> s5 // p7 (Z-PRI) : s3 -> s5 // // p5 (Z-SEC): s10 -> s9 // p6 (Z-PRI): s10 -> s11 // // Rebalancing will run on servers 3, 5, 9, & 11 final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 4, rwStoreDefWithReplication.getName(), Arrays.asList(6), 20); assertTrue("Empty list of moving keys...", movingKeysList.size() > 0); final AtomicBoolean rebalancingStarted = new AtomicBoolean(false); final AtomicBoolean proxyWritesDone = new AtomicBoolean(false); final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries); final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>(); for (String key : baselineTuples.keySet()) { baselineVersions.put(key, new VectorClock()); } final CountDownLatch latch = new CountDownLatch(2); // start get operation. executors.execute(new Runnable() { @Override public void run() { SocketStoreClientFactory factory = null; try { // wait for the rebalancing to begin List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(3), serverMap.get(5), serverMap.get(9), serverMap.get(11)); while (!rebalancingComplete.get()) { Iterator<VoldemortServer> serverIterator = serverList.iterator(); while (serverIterator.hasNext()) { VoldemortServer server = serverIterator.next(); if (ByteUtils .getString(server.getMetadataStore() .get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8") .compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) { logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE"); serverIterator.remove(); } } if (serverList.size() == 0) { rebalancingStarted.set(true); break; } } if (rebalancingStarted.get()) { factory = new SocketStoreClientFactory( new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 3)) .setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS) .setClientZoneId(3)); final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>( testStoreNameRW, null, factory, 3); // Now perform some writes and determine the end state of the changed keys. // Initially, all data now with zero vector clock for (ByteArray movingKey : movingKeysList) { try { String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8"); String valStr = "proxy_write"; storeClientRW.put(keyStr, valStr); baselineTuples.put(keyStr, valStr); baselineVersions.get(keyStr).incrementVersion(11, System.currentTimeMillis()); proxyWritesDone.set(true); if (rebalancingComplete.get()) { break; } } catch (InvalidMetadataException e) { logger.error("Encountered an invalid metadata exception.. ", e); } } } } catch (Exception e) { logger.error("Exception in proxy write thread..", e); exceptions.add(e); } finally { if (factory != null) factory.close(); latch.countDown(); } } }); executors.execute(new Runnable() { @Override public void run() { try { rebalanceKit.rebalance(); } catch (Exception e) { logger.error("Error in rebalancing... ", e); exceptions.add(e); } finally { rebalancingComplete.set(true); latch.countDown(); } } }); latch.await(); executors.shutdown(); executors.awaitTermination(300, TimeUnit.SECONDS); assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true); assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true); checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(3, 4, 5, 9, 10, 11), baselineTuples, baselineVersions); checkConsistentMetadata(finalCluster, serverList); // check No Exception if (exceptions.size() > 0) { for (Exception e : exceptions) { e.printStackTrace(); } fail("Should not see any exceptions."); } // check that the proxy writes were made to the original donor, node 4 List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size()); for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis())); VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis()); for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock); adminClient.setAdminClientCluster(currentCluster); checkForTupleEquivalence(adminClient, 4, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions); // stop servers try { stopServer(serverList); } catch (Exception e) { throw new RuntimeException(e); } } catch (AssertionError ae) { logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae); throw ae; } }
From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java
private void readFilesMultipleThreads(String spoolDir, int numberOfThreads) throws Exception { SpoolDirConfigBean conf = new SpoolDirConfigBean(); conf.dataFormat = DataFormat.TEXT;/*from w w w .jav a 2 s . c om*/ conf.spoolDir = spoolDir; conf.batchSize = 10; conf.overrunLimit = 100; conf.poolingTimeoutSecs = 1; conf.filePattern = "*file-[0-9].log"; conf.pathMatcherMode = PathMatcherMode.GLOB; conf.maxSpoolFiles = 10; conf.initialFileToProcess = null; conf.dataFormatConfig.compression = Compression.NONE; conf.dataFormatConfig.filePatternInArchive = "*"; conf.errorArchiveDir = null; conf.postProcessing = PostProcessingOptions.NONE; conf.retentionTimeMins = 10; conf.dataFormatConfig.textMaxLineLen = 10; conf.dataFormatConfig.onParseError = OnParseError.ERROR; conf.dataFormatConfig.maxStackTraceLines = 0; conf.allowLateDirectory = false; conf.numberOfThreads = numberOfThreads; SpoolDirSource source = new SpoolDirSource(conf); PushSourceRunner runner = new PushSourceRunner.Builder(SpoolDirDSource.class, source).addOutputLane("lane") .build(); AtomicInteger batchCount = new AtomicInteger(0); final List<Record> records = Collections.synchronizedList(new ArrayList<>(10)); runner.runInit(); final int maxBatchSize = 10; try { runner.runProduce(new HashMap<>(), maxBatchSize, output -> { batchCount.incrementAndGet(); synchronized (records) { records.addAll(output.getRecords().get("lane")); } if (records.size() == 50 || batchCount.get() > 10) { runner.setStop(); } }); runner.waitOnProduce(); Assert.assertTrue(batchCount.get() > 1); TestOffsetUtil.compare("-file-9.log::-1", runner.getOffsets()); Assert.assertEquals(50, records.size()); } finally { runner.runDestroy(); } }
From source file:voldemort.client.rebalance.AbstractNonZonedRebalanceTest.java
@Test(timeout = 600000) public void testProxyPutDuringRebalancing() throws Exception { logger.info("Starting testProxyPutDuringRebalancing"); try {// ww w. ja v a 2 s . co m Cluster currentCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0 }, { 1, 3 }, { 2 } }); Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(3)); // start servers 0,1,2 only final List<Integer> serverList = Arrays.asList(0, 1, 2); Map<String, String> configProps = new HashMap<String, String>(); configProps.put("admin.max.threads", "5"); final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps); ExecutorService executors = Executors.newFixedThreadPool(2); final AtomicBoolean rebalancingComplete = new AtomicBoolean(false); final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>()); // Its is imperative that we test in a single shot since multiple // batches would mean the proxy bridges being torn down and // established multiple times and we cannot test against the source // cluster topology then. String bootstrapUrl = getBootstrapUrl(currentCluster, 0); int maxParallel = 2; final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster); populateData(updatedCurrentCluster, rwStoreDefWithReplication, rebalanceKit.controller.getAdminClient(), false); final AdminClient adminClient = rebalanceKit.controller.getAdminClient(); // the plan would cause these partitions to move // Partition : Donor -> Stealer // p2 (SEC) : s1 -> s0 // p3 (PRI) : s1 -> s2 final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 1, rwStoreDefWithReplication.getName(), Arrays.asList(2, 3), 20); assertTrue("Empty list of moving keys...", movingKeysList.size() > 0); final AtomicBoolean rebalancingStarted = new AtomicBoolean(false); final AtomicBoolean proxyWritesDone = new AtomicBoolean(false); final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries); final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>(); for (String key : baselineTuples.keySet()) { baselineVersions.put(key, new VectorClock()); } final CountDownLatch latch = new CountDownLatch(2); // start get operation. executors.execute(new Runnable() { @Override public void run() { SocketStoreClientFactory factory = null; try { // wait for the rebalancing to begin. List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0), serverMap.get(2)); while (!rebalancingComplete.get()) { Iterator<VoldemortServer> serverIterator = serverList.iterator(); while (serverIterator.hasNext()) { VoldemortServer server = serverIterator.next(); if (ByteUtils .getString(server.getMetadataStore() .get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8") .compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) { logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE"); serverIterator.remove(); } } if (serverList.size() == 0) { rebalancingStarted.set(true); break; } } if (!rebalancingComplete.get()) { factory = new SocketStoreClientFactory( new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 0)) .setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS)); final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>( testStoreNameRW, null, factory, 3); // Now perform some writes and determine the end // state // of the changed keys. Initially, all data now with // zero vector clock for (ByteArray movingKey : movingKeysList) { try { if (rebalancingComplete.get()) { break; } String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8"); String valStr = "proxy_write"; storeClientRW.put(keyStr, valStr); baselineTuples.put(keyStr, valStr); // all these keys will have [2:1] vector // clock // is node 2 is the pseudo master in both // moves baselineVersions.get(keyStr).incrementVersion(2, System.currentTimeMillis()); proxyWritesDone.set(true); } catch (InvalidMetadataException e) { // let this go logger.error("Encountered an invalid metadata exception.. ", e); } } } } catch (Exception e) { logger.error("Exception in proxy put thread", e); exceptions.add(e); } finally { if (factory != null) factory.close(); latch.countDown(); } } }); executors.execute(new Runnable() { @Override public void run() { try { rebalanceKit.rebalance(); } catch (Exception e) { logger.error("Error in rebalancing... ", e); exceptions.add(e); } finally { rebalancingComplete.set(true); latch.countDown(); } } }); latch.await(); executors.shutdown(); executors.awaitTermination(300, TimeUnit.SECONDS); assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true); assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true); checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(0, 1, 2), baselineTuples, baselineVersions); checkConsistentMetadata(finalCluster, serverList); // check No Exception if (exceptions.size() > 0) { for (Exception e : exceptions) { e.printStackTrace(); } fail("Should not see any exceptions."); } // check that the proxy writes were made to the original donor, node // 1 List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size()); for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis())); VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis()); for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock); adminClient.setAdminClientCluster(currentCluster); checkForTupleEquivalence(adminClient, 1, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions); // stop servers try { stopServer(serverList); } catch (Exception e) { throw new RuntimeException(e); } } catch (AssertionError ae) { logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae); throw ae; } }
From source file:org.alfresco.repo.web.scripts.blogs.BlogServiceTest.java
/** * Does some stress tests.//from w w w . j av a 2s . c o m * * Currently observed errors: * 1. [repo.action.AsynchronousActionExecutionQueueImpl] Failed to execute asynchronous action: Action[ id=485211db-f117-4976-9530-ab861a19f563, node=null ] * org.alfresco.repo.security.permissions.AccessDeniedException: Access Denied. You do not have the appropriate permissions to perform this operation. * * 2. JSONException, but with root cause being * get(assocs) failed on instance of org.alfresco.repo.template.TemplateNode * The problematic instruction: * ---------- * ==> if person.assocs["cm:avatar"]?? [on line 4, column 7 in org/alfresco/repository/blogs/blogpost.lib.ftl] * * @throws Exception */ public void _testTagsStressTest() throws Exception { final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>()); List<Thread> threads = new ArrayList<Thread>(); System.err.println("Creating and starting threads..."); for (int x = 0; x < 3; x++) { Thread t = new Thread(new Runnable() { public void run() { // set the correct user authenticationComponent.setCurrentUser(USER_ONE); // now do some requests try { for (int y = 0; y < 3; y++) { off_testPostTags(); off_testClearTags(); } System.err.println("------------- SUCCEEDED ---------------"); } catch (Exception e) { System.err.println("------------- ERROR ---------------"); exceptions.add(e); e.printStackTrace(); return; } } }); threads.add(t); t.start(); } /*for (Thread t : threads) { t.start(); }*/ for (Thread t : threads) { t.join(); } System.err.println("------------- STACK TRACES ---------------"); for (Exception e : exceptions) { e.printStackTrace(); } System.err.println("------------- STACK TRACES END ---------------"); if (exceptions.size() > 0) { throw exceptions.get(0); } }
From source file:de.fu_berlin.inf.dpp.net.internal.XMPPTransmitter.java
protected void prepareConnection(final XMPPConnection connection) { // Create Containers this.chats = new HashMap<JID, Chat>(); this.processes = Collections.synchronizedMap(new HashMap<JID, InvitationProcess>()); this.messageTransferQueue = Collections.synchronizedList(new LinkedList<MessageTransfer>()); this.connection = connection; this.chatmanager = connection.getChatManager(); // Register a PacketListener which takes care of decoupling the // processing of Packets from the Smack thread this.connection.addPacketListener(new PacketListener() { protected PacketFilter sessionFilter = PacketExtensionUtils.getSessionIDPacketFilter(sessionID); public void processPacket(final Packet packet) { dispatchThread.executeAsDispatch(new Runnable() { public void run() { if (sessionFilter.accept(packet)) { try { Message message = (Message) packet; JID fromJID = new JID(message.getFrom()); // Change the input method to get the right // chats putIncomingChat(fromJID, message.getThread()); } catch (Exception e) { log.error("An internal error occurred " + "while processing packets", e); }/*from w w w.j a v a 2 s. c o m*/ } receiver.processPacket(packet); } }); } }, null); }
From source file:com.healthcit.cacure.businessdelegates.GeneratedModuleDataManager.java
/** * Generates a random unique key for a given module. * Returns whether or not this key is actually a duplicate * within the appropriate scope (per-module, per-entity or per-entitymodule). */// ww w. j a va2 s .c om private void buildNewKey(Map<String, JSONObject> uniqueKey, Map<String, JSONObject> lastUniqueKey, List<Map<String, Object>> keyQuestions, Map<Object, List<Object>> keyQuestionCombinations, Object uniqueGroupId) { for (int i = 0; i < keyQuestions.size(); ++i) { // Get the unique key question Map<String, Object> uniquePerEntityOrModuleQuestion = keyQuestions.get(i); // Get the question UUID String questionUUID = (String) uniquePerEntityOrModuleQuestion.get(UUID_VALUE); // Generate a random answer value for this question JSONObject randomAnswerValue = generateRandomAnswerValueForUniqueKey(uniquePerEntityOrModuleQuestion, uniqueKey, lastUniqueKey, keyQuestionCombinations, uniqueGroupId); uniqueKey.put(questionUUID, randomAnswerValue); // Track the newly generated key in the questionCombinations collection if (i == keyQuestions.size() - 1) { for (Map.Entry<String, JSONObject> entry : uniqueKey.entrySet()) { String key = GeneratedModuleDataDetail.getTwoPartMapKey(entry.getKey(), entry.getValue().get(ANSWERVALUE_VALUE).toString()); List<Object> list = keyQuestionCombinations.get(key); if (list == null) { list = Collections.synchronizedList(new ArrayList<Object>()); } if (!list.contains(uniqueGroupId)) list.add(uniqueGroupId); keyQuestionCombinations.put(key, list); } } } }