List of usage examples for org.apache.commons.math3.geometry.euclidean.threed Vector3D Vector3D
public Vector3D(double x, double y, double z)
From source file:org.jtrfp.trcl.Tunnel.java
private Vector3D buildTunnel(TDFFile.Tunnel _tun, Vector3D groundVector, boolean entrance) throws IllegalAccessException, UnrecognizedFormatException, FileNotFoundException, FileLoadException, IOException {/*ww w . j av a 2 s .c o m*/ // Entrance uses only a stub. Player is warped to TUNNEL_POS facing // TUNNEL_START_DIRECTION ResourceManager rm = tr.getResourceManager(); LVLFile tlvl = rm.getLVL(_tun.getTunnelLVLFile()); final ColorPaletteVectorList tunnelColorPalette = new ColorPaletteVectorList( tr.getResourceManager().getPalette(lvl.getGlobalPaletteFile())); TextureDescription[] tunnelTexturePalette = rm.getTextures(tlvl.getLevelTextureListFile(), paletteVL, ESTuTvPalette, true); TNLFile tun = tr.getResourceManager().getTNLData(tlvl.getHeightMapOrTunnelFile()); final double segLen = 65536; final double bendiness = 18; List<Segment> segs = tun.getSegments(); final LoadingProgressReporter[] reporters = tunnelAssemblyReporter.generateSubReporters(segs.size()); // Vector3D tunnelEnd = TUNNEL_START_POS; Rotation rotation = entrance ? new Rotation(new Vector3D(0, 0, 1), groundVector) : new Rotation(new Vector3D(0, 0, 1), new Vector3D(1, 0, 0)); Vector3D startPoint = TUNNEL_START_POS; Vector3D segPos = Vector3D.ZERO; final Vector3D top = rotation.applyTo(new Vector3D(0, 1, 0)); /* if (entrance) { // Entrance is just a stub so we only need a few of the segments List<Segment> newSegs = new ArrayList<Segment>(); for (int i = 0; i < 10; i++) { newSegs.add(segs.get(i)); } segs = newSegs; }*/ // CONSTRUCT AND INSTALL SEGMENTS int segIndex = 0; Vector3D finalPos = TUNNEL_START_POS; for (Segment s : segs) { reporters[segIndex].complete(); tr.getReporter().report( "org.jtrfp.trcl.Tunnel." + _tun.getTunnelLVLFile() + ".segment" + (segIndex++) + "", s.getObstacle().name()); // Figure out the space the segment will take Vector3D positionDelta = new Vector3D((double) (s.getEndX() - s.getStartX()) * bendiness * -1, (double) (s.getEndY() - s.getStartY()) * bendiness, segLen); // Create the segment Vector3D position = startPoint.add(rotation.applyTo(segPos)); TunnelSegment ts = new TunnelSegment(tr, s, tunnelTexturePalette, segLen, positionDelta.getX(), positionDelta.getY()); ts.setPosition(position.toArray()); ts.setHeading(entrance ? groundVector : Vector3D.PLUS_I); ts.setTop(entrance ? top : Vector3D.PLUS_J); // Install the segment add(ts); installObstacles(s, tunnelColorPalette, ESTuTvPalette, tunnelTexturePalette, entrance ? groundVector : Vector3D.PLUS_I, entrance ? top : Vector3D.PLUS_J, position, TR.legacy2Modern(s.getStartWidth() * TunnelSegment.TUNNEL_DIA_SCALAR), TR.legacy2Modern(s.getStartWidth() * TunnelSegment.TUNNEL_DIA_SCALAR), tr); // Move origin to next segment segPos = segPos.add(positionDelta); finalPos = position; } // end for(segments) return finalPos; }
From source file:org.jtrfp.trcl.Tunnel.java
/** * Tunnel items: FANBODY.BIN - fan IRIS.BIN - animated iris BEAM.BIN / * PIPE.BIN JAW1.BIN (right) JAW2.BIN (left) - jaws ELECTRI[0-3].RAW - force * field TP1.RAW - good enough for blastable door? * /*from w ww .j av a 2 s . com*/ * @throws IOException * @throws FileLoadException * * */ private void installObstacles(Segment s, ColorPaletteVectorList tunnelColorPalette, ColorPaletteVectorList ESTuTvPalette, TextureDescription[] tunnelTexturePalette, Vector3D heading, Vector3D top, Vector3D wPos, double width, double height, TR tr) throws IllegalAccessException, FileLoadException, IOException { final ColorPaletteVectorList palette = tr.getGlobalPaletteVL(); Obstacle obs = s.getObstacle(); WorldObject wo; Model m; switch (obs) { case none0: break; case doorway: { m = Model.buildCube(tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, .5, .5, 1, 1, tr); wo = new WorldObject(tr, m); wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(top); add(wo); break; } case closedDoor: { BarrierCube bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, .5, .5, 0, 1, false); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.addBehavior(new DamageableBehavior().setHealth(4096)); bc.addBehavior(new ExplodesOnDeath(ExplosionType.Blast)); bc.addBehavior(new DeathBehavior()); bc.addBehavior(new DebrisOnDeathBehavior()); bc.addBehavior(new DestructibleWallBehavior()); bc.setTop(top); add(bc); break; } case blownOpenDoor://TODO: This is not displaying alpha BarrierCube bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, .5, .5, 1, 1, true); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); add(bc); break; case movingWallLeft: { Vector3D endPos = wPos.add(heading.crossProduct(top).scalarMultiply(tunnelDia)); bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, false); bc.addBehavior(new ShiftingObjectBehavior(3000, wPos, endPos)); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; } case movingWallRight: { Vector3D endPos = wPos.subtract(heading.crossProduct(top).scalarMultiply(tunnelDia)); bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, false); bc.addBehavior(new ShiftingObjectBehavior(3000, wPos, endPos)); bc.addBehavior(new CubeCollisionBehavior(bc)); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; } case movingWallDown: { Vector3D endPos = wPos.subtract(top.scalarMultiply(tunnelDia)); bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, false); bc.addBehavior(new ShiftingObjectBehavior(3000, wPos, endPos)); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; } case movingWallUp: { Vector3D endPos = wPos.add(top.scalarMultiply(tunnelDia)); bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, false); bc.addBehavior(new ShiftingObjectBehavior(3000, wPos, endPos)); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; } case wallLeftSTUB: case wallLeft: bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { 0., tunnelDia / 2., 0 }, false); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; case wallRightSTUB: case wallRight: bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia, tunnelDia / 2., 0 }, false); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; case wallDownSTUB: case wallDown: bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, false); bc.setPosition((wPos.subtract(top.scalarMultiply(tunnelDia / 2))).toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; case wallUpSTUB: case wallUp: bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { tunnelDia / 2., tunnelDia / 2., 0 }, false); bc.setPosition((wPos.add(top.scalarMultiply(tunnelDia / 2))).toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; case rotatingHalfWall: { final double rotPeriod = 32768. / (double) s.getRotationSpeed(); final boolean rotate = !Double.isInfinite(rotPeriod); bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { 0, tunnelDia / 2., 0 }, false); if (rotate) { bc.addBehavior(new RotatingObjectBehavior(heading, heading, top, (int) (rotPeriod * 1000.), 0)); bc.setTop(top); } else bc.setTop(new Rotation(heading, Math.PI + Math.PI / 2).applyTo(top)); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.setTop(top); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; } case rotating34Wall: { final double rotPeriod = 32768. / (double) s.getRotationSpeed(); final boolean rotate = !Double.isInfinite(rotPeriod); bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { 0, tunnelDia / 2., 10 }, false); if (rotate) { bc.addBehavior( new RotatingObjectBehavior(heading, heading, top, (int) (rotPeriod * 1000.), Math.PI)); bc.setTop(top); } else bc.setTop(new Rotation(heading, Math.PI + Math.PI / 2).applyTo(top)); bc.setPosition(wPos.toArray()); bc.setHeading(heading); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); bc = new BarrierCube(tr, tunnelDia, tunnelDia, wallThickness, tunnelTexturePalette[s.getObstacleTextureIndex()], new double[] { 0, tunnelDia / 2., 0 }, false); if (rotate) { bc.addBehavior(new RotatingObjectBehavior(heading, heading, top, (int) (rotPeriod * 1000.), Math.PI + Math.PI / 2)); bc.setTop(top); } else bc.setTop(new Rotation(heading, Math.PI * 2).applyTo(top)); bc.setPosition((wPos.add(new Vector3D(100, 0, 0))).toArray()); bc.setHeading(heading); bc.addBehavior(new CubeCollisionBehavior(bc)); add(bc); break; } case fan: wo = new WorldObject(tr, tr.getResourceManager().getBINModel("BLADE.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 28, false, palette, ESTuTvPalette)); wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(top); wo.addBehavior(new CubeCollisionBehavior(wo)); wo.addBehavior(new RotatingObjectBehavior(heading, heading, top, 6000, Math.random() * 2 * Math.PI)); add(wo); wo = new WorldObject(tr, tr.getResourceManager().getBINModel("FANBODY.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 28, false, palette, null));//No ESTuTv for fan for now. wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(top); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; case jawsVertical: // Up jaw wo = new WorldObject(tr, tr.getResourceManager().getBINModel("JAW2.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.addBehavior(new ShiftingObjectBehavior(3000, wPos, wPos.add(top.scalarMultiply(tunnelDia / 2)))); wo.addBehavior(new CubeCollisionBehavior(wo)); wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(heading.crossProduct(top).negate()); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); // Down jaw wo = new WorldObject(tr, tr.getResourceManager().getBINModel("JAW1.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.addBehavior( new ShiftingObjectBehavior(3000, wPos, wPos.subtract(top.scalarMultiply(tunnelDia / 2)))); wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(heading.crossProduct(top).negate()); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; case jawsHorizontal: // Left jaw wo = new WorldObject(tr, tr.getResourceManager().getBINModel("JAW2.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.addBehavior(new ShiftingObjectBehavior(3000, wPos, wPos.add(heading.crossProduct(top).scalarMultiply(tunnelDia / 2)))); wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(top); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); // Right jaw wo = new WorldObject(tr, tr.getResourceManager().getBINModel("JAW1.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.addBehavior(new ShiftingObjectBehavior(3000, wPos, wPos.subtract(heading.crossProduct(top).scalarMultiply(tunnelDia / 2)))); wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(top); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; case metalBeamUp: wo = new WorldObject(tr, tr.getResourceManager().getBINModel("BEAM.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.setPosition(wPos.add(new Vector3D(0, tunnelDia / 6, 0)).toArray()); wo.setHeading(heading); wo.setTop(top); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; case metalBeamDown: wo = new WorldObject(tr, tr.getResourceManager().getBINModel("BEAM.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.setPosition(wPos.add(new Vector3D(0, -tunnelDia / 6, 0)).toArray()); wo.setHeading(heading); wo.setTop(top); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; case metalBeamLeft: wo = new WorldObject(tr, tr.getResourceManager().getBINModel("BEAM.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.setPosition(wPos.add(new Vector3D(-tunnelDia / 6, 0, 0)).toArray()); wo.setHeading(heading); wo.setTop(top.crossProduct(heading)); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; case metalBeamRight: wo = new WorldObject(tr, tr.getResourceManager().getBINModel("BEAM.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 8, false, palette, ESTuTvPalette)); wo.setPosition(wPos.add(new Vector3D(tunnelDia / 6, 0, 0)).toArray()); wo.setHeading(heading); wo.setTop(top.crossProduct(heading)); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; case forceField: { //ELECTRI[0-3].RAW final ForceField ff = new ForceField(tr, (int) tunnelDia, (int) wallThickness); ff.setPosition(wPos.toArray()); ff.setHeading(heading); ff.setTop(top); add(ff); break; } // Invisible walls, as far as I know, are never used. // This makes sense: There is nothing fun about trying to get through a // tunnel and crashing into invisible walls. case invisibleWallUp:// TODO break; case invisibleWallDown:// TODO break; case invisibleWallLeft:// TODO break; case invisibleWallRight:// TODO break; case iris: { wo = new WorldObject(tr, tr.getResourceManager().getBINModel("IRIS.BIN", tunnelTexturePalette[s.getObstacleTextureIndex()], 4 * 256, false, palette, ESTuTvPalette)); final Model mod = wo.getModel(); wo.addBehavior(new IrisBehavior(new Sequencer(mod.getFrameDelayInMillis(), 2, true), width)); wo.setPosition(wPos.toArray()); wo.setHeading(heading); wo.setTop(top); wo.addBehavior(new CubeCollisionBehavior(wo)); add(wo); break; } }// end switch(obstruction) }
From source file:org.jtrfp.trcl.World.java
public Camera newCamera() { final Camera camera = new Camera(tr.gpu.get()); camera.setViewDepth(cameraViewDepth); camera.setPosition(//from ww w .j a va 2s . c o m new Vector3D(camera.getCameraPosition().getX(), sizeY / 3.15, camera.getCameraPosition().getZ())); return camera; }
From source file:org.micromanager.asidispim.AcquisitionPanel.java
/** * Actually runs the acquisition; does the dirty work of setting * up the controller, the circular buffer, starting the cameras, * grabbing the images and putting them into the acquisition, etc. * @param testAcq true if running test acquisition only (see runTestAcquisition() javadoc) * @param testAcqSide only applies to test acquisition, passthrough from runTestAcquisition() * @return true if ran without any fatal errors. *///from w ww.j a v a 2 s.c o m private boolean runAcquisitionPrivate(boolean testAcq, Devices.Sides testAcqSide) { // sanity check, shouldn't call this unless we aren't running an acquisition if (gui_.isAcquisitionRunning()) { MyDialogUtils.showError("An acquisition is already running"); return false; } if (ASIdiSPIM.getFrame().getHardwareInUse()) { MyDialogUtils.showError("Hardware is being used by something else (maybe autofocus?)"); return false; } boolean liveModeOriginally = gui_.isLiveModeOn(); if (liveModeOriginally) { gui_.enableLiveMode(false); } // make sure slice timings are up to date // do this automatically; we used to prompt user if they were out of date // do this before getting snapshot of sliceTiming_ in acqSettings recalculateSliceTiming(!minSlicePeriodCB_.isSelected()); if (!sliceTiming_.valid) { MyDialogUtils.showError("Error in calculating the slice timing; is the camera mode set correctly?"); return false; } AcquisitionSettings acqSettingsOrig = getCurrentAcquisitionSettings(); if (acqSettingsOrig.cameraMode == CameraModes.Keys.LIGHT_SHEET && core_.getPixelSizeUm() < 1e-6) { // can't compare equality directly with floating point values so call < 1e-9 is zero or negative ReportingUtils.showError("Need to configure pixel size in Micro-Manager to use light sheet mode."); return false; } // if a test acquisition then only run single timpoint, no autofocus // allow multi-positions for test acquisition for now, though perhaps this is not desirable if (testAcq) { acqSettingsOrig.useTimepoints = false; acqSettingsOrig.numTimepoints = 1; acqSettingsOrig.useAutofocus = false; acqSettingsOrig.separateTimepoints = false; // if called from the setup panels then the side will be specified // so we can do an appropriate single-sided acquisition // if called from the acquisition panel then NONE will be specified // and run according to existing settings if (testAcqSide != Devices.Sides.NONE) { acqSettingsOrig.numSides = 1; acqSettingsOrig.firstSideIsA = (testAcqSide == Devices.Sides.A); } // work around limitation of not being able to use PLogic per-volume switching with single side // => do per-volume switching instead (only difference should be extra time to switch) if (acqSettingsOrig.useChannels && acqSettingsOrig.channelMode == MultichannelModes.Keys.VOLUME_HW && acqSettingsOrig.numSides < 2) { acqSettingsOrig.channelMode = MultichannelModes.Keys.VOLUME; } } double volumeDuration = computeActualVolumeDuration(acqSettingsOrig); double timepointDuration = computeTimepointDuration(); long timepointIntervalMs = Math.round(acqSettingsOrig.timepointInterval * 1000); // use hardware timing if < 1 second between timepoints // experimentally need ~0.5 sec to set up acquisition, this gives a bit of cushion // cannot do this in getCurrentAcquisitionSettings because of mutually recursive // call with computeActualVolumeDuration() if (acqSettingsOrig.numTimepoints > 1 && timepointIntervalMs < (timepointDuration + 750) && !acqSettingsOrig.isStageScanning) { acqSettingsOrig.hardwareTimepoints = true; } if (acqSettingsOrig.useMultiPositions) { if (acqSettingsOrig.hardwareTimepoints || ((acqSettingsOrig.numTimepoints > 1) && (timepointIntervalMs < timepointDuration * 1.2))) { // change to not hardwareTimepoints and warn user // but allow acquisition to continue acqSettingsOrig.hardwareTimepoints = false; MyDialogUtils.showError("Timepoint interval may not be sufficient " + "depending on actual time required to change positions. " + "Proceed at your own risk."); } } // now acqSettings should be read-only final AcquisitionSettings acqSettings = acqSettingsOrig; // generate string for log file Gson gson = new GsonBuilder().setPrettyPrinting().create(); final String acqSettingsJSON = gson.toJson(acqSettings); // get MM device names for first/second cameras to acquire String firstCamera, secondCamera; Devices.Keys firstCameraKey, secondCameraKey; boolean firstSideA = acqSettings.firstSideIsA; if (firstSideA) { firstCamera = devices_.getMMDevice(Devices.Keys.CAMERAA); firstCameraKey = Devices.Keys.CAMERAA; secondCamera = devices_.getMMDevice(Devices.Keys.CAMERAB); secondCameraKey = Devices.Keys.CAMERAB; } else { firstCamera = devices_.getMMDevice(Devices.Keys.CAMERAB); firstCameraKey = Devices.Keys.CAMERAB; secondCamera = devices_.getMMDevice(Devices.Keys.CAMERAA); secondCameraKey = Devices.Keys.CAMERAA; } boolean sideActiveA, sideActiveB; final boolean twoSided = acqSettings.numSides > 1; if (twoSided) { sideActiveA = true; sideActiveB = true; } else { secondCamera = null; if (firstSideA) { sideActiveA = true; sideActiveB = false; } else { sideActiveA = false; sideActiveB = true; } } final boolean acqBothCameras = acqSettings.acquireBothCamerasSimultaneously; boolean camActiveA = sideActiveA || acqBothCameras; boolean camActiveB = sideActiveB || acqBothCameras; if (camActiveA) { if (!devices_.isValidMMDevice(Devices.Keys.CAMERAA)) { MyDialogUtils.showError("Using side A but no camera specified for that side."); return false; } Devices.Keys camKey = Devices.Keys.CAMERAA; Devices.Libraries camLib = devices_.getMMDeviceLibrary(camKey); if (!CameraModes.getValidModeKeys(camLib).contains(getSPIMCameraMode())) { MyDialogUtils.showError("Camera trigger mode set to " + getSPIMCameraMode().toString() + " but camera A doesn't support it."); return false; } // Hamamatsu only supports light sheet mode with USB cameras. Tt seems due to static architecture of getValidModeKeys // there is no good way to tell earlier that light sheet mode isn't supported. I don't like this but don't see another option. if (camLib == Devices.Libraries.HAMCAM && props_.getPropValueString(camKey, Properties.Keys.CAMERA_BUS) .equals(Properties.Values.USB3)) { if (getSPIMCameraMode() == CameraModes.Keys.LIGHT_SHEET) { MyDialogUtils.showError("Hamamatsu only supports light sheet mode with CameraLink readout."); return false; } } } if (sideActiveA) { if (!devices_.isValidMMDevice(Devices.Keys.GALVOA)) { MyDialogUtils.showError("Using side A but no scanner specified for that side."); return false; } if (requiresPiezos(acqSettings.spimMode) && !devices_.isValidMMDevice(Devices.Keys.PIEZOA)) { MyDialogUtils.showError( "Using side A and acquisition mode requires piezos but no piezo specified for that side."); return false; } } if (camActiveB) { if (!devices_.isValidMMDevice(Devices.Keys.CAMERAB)) { MyDialogUtils.showError("Using side B but no camera specified for that side."); return false; } if (!CameraModes.getValidModeKeys(devices_.getMMDeviceLibrary(Devices.Keys.CAMERAB)) .contains(getSPIMCameraMode())) { MyDialogUtils.showError("Camera trigger mode set to " + getSPIMCameraMode().toString() + " but camera B doesn't support it."); return false; } } if (sideActiveB) { if (!devices_.isValidMMDevice(Devices.Keys.GALVOB)) { MyDialogUtils.showError("Using side B but no scanner specified for that side."); return false; } if (requiresPiezos(acqSettings.spimMode) && !devices_.isValidMMDevice(Devices.Keys.PIEZOB)) { MyDialogUtils.showError( "Using side B and acquisition mode requires piezos but no piezo specified for that side."); return false; } } boolean usingDemoCam = (devices_.getMMDeviceLibrary(Devices.Keys.CAMERAA).equals(Devices.Libraries.DEMOCAM) && camActiveA) || (devices_.getMMDeviceLibrary(Devices.Keys.CAMERAB).equals(Devices.Libraries.DEMOCAM) && camActiveB); // set up channels int nrChannelsSoftware = acqSettings.numChannels; // how many times we trigger the controller per stack int nrSlicesSoftware = acqSettings.numSlices; String originalChannelConfig = ""; boolean changeChannelPerVolumeSoftware = false; if (acqSettings.useChannels) { if (acqSettings.numChannels < 1) { MyDialogUtils.showError("\"Channels\" is checked, but no channels are selected"); return false; } // get current channel so that we can restore it, then set channel appropriately originalChannelConfig = multiChannelPanel_.getCurrentConfig(); switch (acqSettings.channelMode) { case VOLUME: changeChannelPerVolumeSoftware = true; multiChannelPanel_.initializeChannelCycle(); break; case VOLUME_HW: case SLICE_HW: if (acqSettings.numChannels == 1) { // only 1 channel selected so don't have to really use hardware switching multiChannelPanel_.initializeChannelCycle(); multiChannelPanel_.selectNextChannel(); } else { // we have at least 2 channels boolean success = controller_.setupHardwareChannelSwitching(acqSettings); if (!success) { MyDialogUtils.showError("Couldn't set up slice hardware channel switching."); return false; } nrChannelsSoftware = 1; nrSlicesSoftware = acqSettings.numSlices * acqSettings.numChannels; } break; default: MyDialogUtils .showError("Unsupported multichannel mode \"" + acqSettings.channelMode.toString() + "\""); return false; } } if (twoSided && acqBothCameras) { nrSlicesSoftware *= 2; } if (acqSettings.hardwareTimepoints) { // in hardwareTimepoints case we trigger controller once for all timepoints => need to // adjust number of frames we expect back from the camera during MM's SequenceAcquisition if (acqSettings.cameraMode == CameraModes.Keys.OVERLAP) { // For overlap mode we are send one extra trigger per channel per side for volume-switching (both PLogic and not) // This holds for all multi-channel modes, just the order in which the extra trigger comes varies // Very last trigger won't ever return a frame so subtract 1. nrSlicesSoftware = ((acqSettings.numSlices + 1) * acqSettings.numChannels * acqSettings.numTimepoints); if (twoSided && acqBothCameras) { nrSlicesSoftware *= 2; } nrSlicesSoftware -= 1; } else { // we get back one image per trigger for all trigger modes other than OVERLAP // and we have already computed how many images that is (nrSlicesSoftware) nrSlicesSoftware *= acqSettings.numTimepoints; if (twoSided && acqBothCameras) { nrSlicesSoftware *= 2; } } } // set up XY positions int nrPositions = 1; PositionList positionList = new PositionList(); if (acqSettings.useMultiPositions) { try { positionList = gui_.getPositionList(); nrPositions = positionList.getNumberOfPositions(); } catch (MMScriptException ex) { MyDialogUtils.showError(ex, "Error getting position list for multiple XY positions"); } if (nrPositions < 1) { MyDialogUtils.showError("\"Positions\" is checked, but no positions are in position list"); return false; } } // make sure we have cameras selected if (!checkCamerasAssigned(true)) { return false; } final float cameraReadoutTime = computeCameraReadoutTime(); final double exposureTime = acqSettings.sliceTiming.cameraExposure; final boolean save = saveCB_.isSelected() && !testAcq; final String rootDir = rootField_.getText(); // make sure we have a valid directory to save in final File dir = new File(rootDir); if (save) { try { if (!dir.exists()) { if (!dir.mkdir()) { throw new Exception(); } } } catch (Exception ex) { MyDialogUtils.showError("Could not create directory for saving acquisition data."); return false; } } if (acqSettings.separateTimepoints) { // because separate timepoints closes windows when done, force the user to save data to disk to avoid confusion if (!save) { MyDialogUtils.showError("For separate timepoints, \"Save while acquiring\" must be enabled."); return false; } // for separate timepoints, make sure the directory is empty to make sure naming pattern is "clean" // this is an arbitrary choice to avoid confusion later on when looking at file names if (dir.list().length > 0) { MyDialogUtils.showError("For separate timepoints the saving directory must be empty."); return false; } } int nrFrames; // how many Micro-manager "frames" = time points to take if (acqSettings.separateTimepoints) { nrFrames = 1; nrRepeats_ = acqSettings.numTimepoints; } else { nrFrames = acqSettings.numTimepoints; nrRepeats_ = 1; } AcquisitionModes.Keys spimMode = acqSettings.spimMode; boolean autoShutter = core_.getAutoShutter(); boolean shutterOpen = false; // will read later String originalCamera = core_.getCameraDevice(); // more sanity checks // TODO move these checks earlier, before we set up channels and XY positions // make sure stage scan is supported if selected if (acqSettings.isStageScanning) { if (!devices_.isTigerDevice(Devices.Keys.XYSTAGE) || !props_.hasProperty(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_NUMLINES)) { MyDialogUtils.showError("Must have stage with scan-enabled firmware for stage scanning."); return false; } if (acqSettings.spimMode == AcquisitionModes.Keys.STAGE_SCAN_INTERLEAVED && acqSettings.numSides < 2) { MyDialogUtils.showError("Interleaved mode requires two sides."); return false; } } double sliceDuration = acqSettings.sliceTiming.sliceDuration; if (exposureTime + cameraReadoutTime > sliceDuration) { // should only only possible to mess this up using advanced timing settings // or if there are errors in our own calculations MyDialogUtils.showError("Exposure time of " + exposureTime + " is longer than time needed for a line scan with" + " readout time of " + cameraReadoutTime + "\n" + "This will result in dropped frames. " + "Please change input"); return false; } // if we want to do hardware timepoints make sure there's not a problem // lots of different situations where hardware timepoints can't be used... if (acqSettings.hardwareTimepoints) { if (acqSettings.useChannels && acqSettings.channelMode == MultichannelModes.Keys.VOLUME_HW) { // both hardware time points and volume channel switching use SPIMNumRepeats property // TODO this seems a severe limitation, maybe this could be changed in the future via firmware change MyDialogUtils.showError("Cannot use hardware time points (small time point interval)" + " with hardware channel switching volume-by-volume."); return false; } if (acqSettings.isStageScanning) { // stage scanning needs to be triggered for each time point MyDialogUtils.showError( "Cannot use hardware time points (small time point interval)" + " with stage scanning."); return false; } if (acqSettings.separateTimepoints) { MyDialogUtils.showError("Cannot use hardware time points (small time point interval)" + " with separate viewers/file for each time point."); return false; } if (acqSettings.useAutofocus) { MyDialogUtils.showError("Cannot use hardware time points (small time point interval)" + " with autofocus during acquisition."); return false; } if (acqSettings.useMovementCorrection) { MyDialogUtils.showError("Cannot use hardware time points (small time point interval)" + " with movement correction during acquisition."); return false; } if (acqSettings.useChannels && acqSettings.channelMode == MultichannelModes.Keys.VOLUME) { MyDialogUtils.showError("Cannot use hardware time points (small time point interval)" + " with software channels (need to use PLogic channel switching)."); return false; } if (spimMode == AcquisitionModes.Keys.NO_SCAN) { MyDialogUtils.showError("Cannot do hardware time points when no scan mode is used." + " Use the number of slices to set the number of images to acquire."); return false; } } if (acqSettings.useChannels && acqSettings.channelMode == MultichannelModes.Keys.VOLUME_HW && acqSettings.numSides < 2) { MyDialogUtils.showError("Cannot do PLogic channel switching of volume when only one" + " side is selected. Pester the developers if you need this."); return false; } // make sure we aren't trying to collect timepoints faster than we can if (!acqSettings.useMultiPositions && acqSettings.numTimepoints > 1) { if (timepointIntervalMs < volumeDuration) { MyDialogUtils .showError("Time point interval shorter than" + " the time to collect a single volume.\n"); return false; } } // Autofocus settings; only used if acqSettings.useAutofocus is true boolean autofocusAtT0 = false; int autofocusEachNFrames = 10; String autofocusChannel = ""; if (acqSettings.useAutofocus) { autofocusAtT0 = prefs_.getBoolean(MyStrings.PanelNames.AUTOFOCUS.toString(), Properties.Keys.PLUGIN_AUTOFOCUS_ACQBEFORESTART, false); autofocusEachNFrames = props_.getPropValueInteger(Devices.Keys.PLUGIN, Properties.Keys.PLUGIN_AUTOFOCUS_EACHNIMAGES); autofocusChannel = props_.getPropValueString(Devices.Keys.PLUGIN, Properties.Keys.PLUGIN_AUTOFOCUS_CHANNEL); // double-check that selected channel is valid if we are doing multi-channel if (acqSettings.useChannels) { String channelGroup = props_.getPropValueString(Devices.Keys.PLUGIN, Properties.Keys.PLUGIN_MULTICHANNEL_GROUP); StrVector channels = gui_.getMMCore().getAvailableConfigs(channelGroup); boolean found = false; for (String channel : channels) { if (channel.equals(autofocusChannel)) { found = true; break; } } if (!found) { MyDialogUtils.showError("Invalid autofocus channel selected on autofocus tab."); return false; } } } // Movement Correction settings; only used if acqSettings.useMovementCorrection is true int correctMovementEachNFrames = 10; String correctMovementChannel = ""; int cmChannelNumber = -1; if (acqSettings.useMovementCorrection) { correctMovementEachNFrames = props_.getPropValueInteger(Devices.Keys.PLUGIN, Properties.Keys.PLUGIN_AUTOFOCUS_CORRECTMOVEMENT_EACHNIMAGES); correctMovementChannel = props_.getPropValueString(Devices.Keys.PLUGIN, Properties.Keys.PLUGIN_AUTOFOCUS_CORRECTMOVEMENT_CHANNEL); // double-check that selected channel is valid if we are doing multi-channel if (acqSettings.useChannels) { String channelGroup = props_.getPropValueString(Devices.Keys.PLUGIN, Properties.Keys.PLUGIN_MULTICHANNEL_GROUP); StrVector channels = gui_.getMMCore().getAvailableConfigs(channelGroup); boolean found = false; for (String channel : channels) { if (channel.equals(correctMovementChannel)) { found = true; break; } } if (!found) { MyDialogUtils.showError("Invalid movement correction channel selected on autofocus tab."); return false; } } } // the circular buffer, which is used by both cameras, can only have one image size setting // => require same image height and width for both cameras if both are used if (twoSided || acqBothCameras) { try { Rectangle roi_1 = core_.getROI(firstCamera); Rectangle roi_2 = core_.getROI(secondCamera); if (roi_1.width != roi_2.width || roi_1.height != roi_2.height) { MyDialogUtils.showError( "Two cameras' ROI height and width must be equal because of Micro-Manager's circular buffer"); return false; } } catch (Exception ex) { MyDialogUtils.showError(ex, "Problem getting camera ROIs"); } } cameras_.setCameraForAcquisition(firstCameraKey, true); if (twoSided || acqBothCameras) { cameras_.setCameraForAcquisition(secondCameraKey, true); } // save exposure time, will restore at end of acquisition try { prefs_.putFloat(MyStrings.PanelNames.SETTINGS.toString(), Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_FIRST.toString(), (float) core_.getExposure(devices_.getMMDevice(firstCameraKey))); if (twoSided || acqBothCameras) { prefs_.putFloat(MyStrings.PanelNames.SETTINGS.toString(), Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_SECOND.toString(), (float) core_.getExposure(devices_.getMMDevice(secondCameraKey))); } } catch (Exception ex) { MyDialogUtils.showError(ex, "could not cache exposure"); } try { core_.setExposure(firstCamera, exposureTime); if (twoSided || acqBothCameras) { core_.setExposure(secondCamera, exposureTime); } gui_.refreshGUIFromCache(); } catch (Exception ex) { MyDialogUtils.showError(ex, "could not set exposure"); } // seems to have a problem if the core's camera has been set to some other // camera before we start doing things, so set to a SPIM camera try { core_.setCameraDevice(firstCamera); } catch (Exception ex) { MyDialogUtils.showError(ex, "could not set camera"); } // empty out circular buffer try { core_.clearCircularBuffer(); } catch (Exception ex) { MyDialogUtils.showError(ex, "Error emptying out the circular buffer"); return false; } // stop the serial traffic for position updates during acquisition // if we return from this function (including aborting) we need to unpause posUpdater_.pauseUpdates(true); // initialize stage scanning so we can restore state Point2D.Double xyPosUm = new Point2D.Double(); float origXSpeed = 1f; // don't want 0 in case something goes wrong float origXAccel = 1f; // don't want 0 in case something goes wrong if (acqSettings.isStageScanning) { try { xyPosUm = core_.getXYStagePosition(devices_.getMMDevice(Devices.Keys.XYSTAGE)); origXSpeed = props_.getPropValueFloat(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED); origXAccel = props_.getPropValueFloat(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL); } catch (Exception ex) { MyDialogUtils.showError( "Could not get XY stage position, speed, or acceleration for stage scan initialization"); posUpdater_.pauseUpdates(false); return false; } // if X speed is less than 0.2 mm/s then it probably wasn't restored to correct speed some other time // we offer to set it to a more normal speed in that case, until the user declines and we stop asking if (origXSpeed < 0.2 && resetXaxisSpeed_) { resetXaxisSpeed_ = MyDialogUtils.getConfirmDialogResult( "Max speed of X axis is small, perhaps it was not correctly restored after stage scanning previously. Do you want to set it to 1 mm/s now?", JOptionPane.YES_NO_OPTION); // once the user selects "no" then resetXaxisSpeed_ will be false and stay false until plugin is launched again if (resetXaxisSpeed_) { props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED, 1f); origXSpeed = 1f; } } } numTimePointsDone_ = 0; // force saving as image stacks, not individual files // implementation assumes just two options, either // TaggedImageStorageDiskDefault.class or TaggedImageStorageMultipageTiff.class boolean separateImageFilesOriginally = ImageUtils.getImageStorageClass() .equals(TaggedImageStorageDiskDefault.class); ImageUtils.setImageStorageClass(TaggedImageStorageMultipageTiff.class); // Set up controller SPIM parameters (including from Setup panel settings) // want to do this, even with demo cameras, so we can test everything else if (!controller_.prepareControllerForAquisition(acqSettings)) { posUpdater_.pauseUpdates(false); return false; } boolean nonfatalError = false; long acqButtonStart = System.currentTimeMillis(); String acqName = ""; acq_ = null; // do not want to return from within this loop => throw exception instead // loop is executed once per acquisition (i.e. once if separate viewers isn't selected // or once per timepoint if separate viewers is selected) long repeatStart = System.currentTimeMillis(); for (int acqNum = 0; !cancelAcquisition_.get() && acqNum < nrRepeats_; acqNum++) { // handle intervals between (software-timed) repeats // only applies when doing separate viewers for each timepoint // and have multiple timepoints long repeatNow = System.currentTimeMillis(); long repeatdelay = repeatStart + acqNum * timepointIntervalMs - repeatNow; while (repeatdelay > 0 && !cancelAcquisition_.get()) { updateAcquisitionStatus(AcquisitionStatus.WAITING, (int) (repeatdelay / 1000)); long sleepTime = Math.min(1000, repeatdelay); try { Thread.sleep(sleepTime); } catch (InterruptedException e) { ReportingUtils.showError(e); } repeatNow = System.currentTimeMillis(); repeatdelay = repeatStart + acqNum * timepointIntervalMs - repeatNow; } BlockingQueue<TaggedImage> bq = new LinkedBlockingQueue<TaggedImage>(10); // try to close last acquisition viewer if there could be one open (only in single acquisition per timepoint mode) if (acqSettings.separateTimepoints && (acq_ != null) && !cancelAcquisition_.get()) { try { // following line needed due to some arcane internal reason, otherwise // call to closeAcquisitionWindow() fails silently. // See http://sourceforge.net/p/micro-manager/mailman/message/32999320/ acq_.promptToSave(false); gui_.closeAcquisitionWindow(acqName); } catch (Exception ex) { // do nothing if unsuccessful } } if (acqSettings.separateTimepoints) { // call to getUniqueAcquisitionName is extra safety net, we have checked that directory is empty before starting acqName = gui_.getUniqueAcquisitionName(prefixField_.getText() + "_" + acqNum); } else { acqName = gui_.getUniqueAcquisitionName(prefixField_.getText()); } long extraStageScanTimeout = 0; if (acqSettings.isStageScanning) { // approximately compute the extra time to wait for stack to begin (ramp up time) // by getting the volume duration and subtracting the acquisition duration and then dividing by two extraStageScanTimeout = (long) Math.ceil(computeActualVolumeDuration(acqSettings) - (acqSettings.numSlices * acqSettings.numChannels * acqSettings.sliceTiming.sliceDuration)) / 2; } long extraMultiXYTimeout = 0; if (acqSettings.useMultiPositions) { // give 20 extra seconds to arrive at intended XY position instead of trying to get fancy about computing actual move time extraMultiXYTimeout = XYSTAGETIMEOUT; // furthermore make sure that the main timeout value is at least 20ms because MM's position list uses this (via MultiStagePosition.goToPosition) if (props_.getPropValueInteger(Devices.Keys.CORE, Properties.Keys.CORE_TIMEOUT_MS) < XYSTAGETIMEOUT) { props_.setPropValue(Devices.Keys.CORE, Properties.Keys.CORE_TIMEOUT_MS, XYSTAGETIMEOUT); } } VirtualAcquisitionDisplay vad = null; WindowListener wl_acq = null; WindowListener[] wls_orig = null; try { // check for stop button before each acquisition if (cancelAcquisition_.get()) { throw new IllegalMonitorStateException("User stopped the acquisition"); } // flag that we are actually running acquisition now acquisitionRunning_.set(true); ReportingUtils.logMessage("diSPIM plugin starting acquisition " + acqName + " with following settings: " + acqSettingsJSON); final int numMMChannels = acqSettings.numSides * acqSettings.numChannels * (acqBothCameras ? 2 : 1); if (spimMode == AcquisitionModes.Keys.NO_SCAN && !acqSettings.separateTimepoints) { // swap nrFrames and numSlices gui_.openAcquisition(acqName, rootDir, acqSettings.numSlices, numMMChannels, nrFrames, nrPositions, true, save); } else { gui_.openAcquisition(acqName, rootDir, nrFrames, numMMChannels, acqSettings.numSlices, nrPositions, true, save); } channelNames_ = new String[numMMChannels]; // generate channel names and colors // also builds viewString for MultiViewRegistration metadata String viewString = ""; final String SEPARATOR = "_"; for (int reflect = 0; reflect < 2; reflect++) { // only run for loop once unless acqBothCameras is true // if acqBothCameras is true then run second time to add "epi" channels if (reflect > 0 && !acqBothCameras) { continue; } // set up channels (side A/B is treated as channel too) if (acqSettings.useChannels) { ChannelSpec[] channels = multiChannelPanel_.getUsedChannels(); for (int i = 0; i < channels.length; i++) { String chName = "-" + channels[i].config_ + (reflect > 0 ? "-epi" : ""); // same algorithm for channel index vs. specified channel and side as in comments of code below // that figures out the channel where to file each incoming image int channelIndex = i; if (twoSided) { channelIndex *= 2; } channelIndex += reflect * numMMChannels / 2; channelNames_[channelIndex] = firstCamera + chName; viewString += NumberUtils.intToDisplayString(0) + SEPARATOR; if (twoSided) { channelNames_[channelIndex + 1] = secondCamera + chName; viewString += NumberUtils.intToDisplayString(90) + SEPARATOR; } } } else { // single-channel int channelIndex = reflect * numMMChannels / 2; channelNames_[channelIndex] = firstCamera + (reflect > 0 ? "-epi" : ""); viewString += NumberUtils.intToDisplayString(0) + SEPARATOR; if (twoSided) { channelNames_[channelIndex + 1] = secondCamera + (reflect > 0 ? "-epi" : ""); viewString += NumberUtils.intToDisplayString(90) + SEPARATOR; } } } // strip last separator of viewString (for Multiview Reconstruction) viewString = viewString.substring(0, viewString.length() - 1); // assign channel names and colors for (int i = 0; i < numMMChannels; i++) { gui_.setChannelName(acqName, i, channelNames_[i]); gui_.setChannelColor(acqName, i, getChannelColor(i)); } if (acqSettings.useMovementCorrection) { for (int i = 0; i < acqSettings.numChannels; i++) { if (channelNames_[i].equals(firstCamera + "-" + correctMovementChannel)) { cmChannelNumber = i; } } if (cmChannelNumber == -1) { MyDialogUtils.showError( "The channel selected for movement correction on the auitofocus tab was not found in this acquisition"); return false; } } zStepUm_ = acqSettings.isStageScanning ? controller_.getActualStepSizeUm() // computed step size, accounting for quantization of controller : acqSettings.stepSizeUm; // should be same as PanelUtils.getSpinnerFloatValue(stepSize_) // initialize acquisition gui_.initializeAcquisition(acqName, (int) core_.getImageWidth(), (int) core_.getImageHeight(), (int) core_.getBytesPerPixel(), (int) core_.getImageBitDepth()); gui_.promptToSaveAcquisition(acqName, !testAcq); // These metadata have to be added after initialization, // otherwise they will not be shown?! gui_.setAcquisitionProperty(acqName, "NumberOfSides", NumberUtils.doubleToDisplayString(acqSettings.numSides)); gui_.setAcquisitionProperty(acqName, "FirstSide", acqSettings.firstSideIsA ? "A" : "B"); gui_.setAcquisitionProperty(acqName, "SlicePeriod_ms", actualSlicePeriodLabel_.getText()); gui_.setAcquisitionProperty(acqName, "LaserExposure_ms", NumberUtils.doubleToDisplayString(acqSettings.desiredLightExposure)); gui_.setAcquisitionProperty(acqName, "VolumeDuration", actualVolumeDurationLabel_.getText()); gui_.setAcquisitionProperty(acqName, "SPIMmode", spimMode.toString()); // Multi-page TIFF saving code wants this one (cameras are all 16-bits, so not much reason for anything else) gui_.setAcquisitionProperty(acqName, "PixelType", "GRAY16"); gui_.setAcquisitionProperty(acqName, "UseAutofocus", acqSettings.useAutofocus ? Boolean.TRUE.toString() : Boolean.FALSE.toString()); gui_.setAcquisitionProperty(acqName, "UseMotionCorrection", acqSettings.useMovementCorrection ? Boolean.TRUE.toString() : Boolean.FALSE.toString()); gui_.setAcquisitionProperty(acqName, "HardwareTimepoints", acqSettings.hardwareTimepoints ? Boolean.TRUE.toString() : Boolean.FALSE.toString()); gui_.setAcquisitionProperty(acqName, "SeparateTimepoints", acqSettings.separateTimepoints ? Boolean.TRUE.toString() : Boolean.FALSE.toString()); gui_.setAcquisitionProperty(acqName, "CameraMode", acqSettings.cameraMode.toString()); gui_.setAcquisitionProperty(acqName, "z-step_um", NumberUtils.doubleToDisplayString(zStepUm_)); // Properties for use by MultiViewRegistration plugin // Format is: x_y_z, set to 1 if we should rotate around this axis. gui_.setAcquisitionProperty(acqName, "MVRotationAxis", "0_1_0"); gui_.setAcquisitionProperty(acqName, "MVRotations", viewString); // save XY and SPIM head position in metadata // update positions first at expense of two extra serial transactions refreshXYZPositions(); gui_.setAcquisitionProperty(acqName, "Position_X", positions_.getPositionString(Devices.Keys.XYSTAGE, Directions.X)); gui_.setAcquisitionProperty(acqName, "Position_Y", positions_.getPositionString(Devices.Keys.XYSTAGE, Directions.Y)); gui_.setAcquisitionProperty(acqName, "Position_SPIM_Head", positions_.getPositionString(Devices.Keys.UPPERZDRIVE)); gui_.setAcquisitionProperty(acqName, "SPIMAcqSettings", acqSettingsJSON); gui_.setAcquisitionProperty(acqName, "SPIMtype", ASIdiSPIM.oSPIM ? "oSPIM" : "diSPIM"); gui_.setAcquisitionProperty(acqName, "AcquisitionName", acqName); gui_.setAcquisitionProperty(acqName, "Prefix", acqName); // get circular buffer ready // do once here but not per-trigger; need to ensure ROI changes registered core_.initializeCircularBuffer(); // superset of clearCircularBuffer() // TODO: use new acquisition interface that goes through the pipeline //gui_.setAcquisitionAddImageAsynchronous(acqName); acq_ = gui_.getAcquisition(acqName); // Dive into MM internals since script interface does not support pipelines ImageCache imageCache = acq_.getImageCache(); vad = acq_.getAcquisitionWindow(); imageCache.addImageCacheListener(vad); // Start pumping images into the ImageCache DefaultTaggedImageSink sink = new DefaultTaggedImageSink(bq, imageCache); sink.start(); // remove usual window listener(s) and replace it with our own // that will prompt before closing and cancel acquisition if confirmed // this should be considered a hack, it may not work perfectly // I have confirmed that there is only one windowListener and it seems to // also be related to window closing // Note that ImageJ's acquisition window is AWT instead of Swing wls_orig = vad.getImagePlus().getWindow().getWindowListeners(); for (WindowListener l : wls_orig) { vad.getImagePlus().getWindow().removeWindowListener(l); } wl_acq = new WindowAdapter() { @Override public void windowClosing(WindowEvent arg0) { // if running acquisition only close if user confirms if (acquisitionRunning_.get()) { boolean stop = MyDialogUtils.getConfirmDialogResult( "Do you really want to abort the acquisition?", JOptionPane.YES_NO_OPTION); if (stop) { cancelAcquisition_.set(true); } } } }; vad.getImagePlus().getWindow().addWindowListener(wl_acq); // patterned after implementation in MMStudio.java // will be null if not saving to disk lastAcquisitionPath_ = acq_.getImageCache().getDiskLocation(); lastAcquisitionName_ = acqName; // only used when motion correction was requested MovementDetector[] movementDetectors = new MovementDetector[nrPositions]; // Transformation matrices to convert between camera and stage coordinates final Vector3D yAxis = new Vector3D(0.0, 1.0, 0.0); final Rotation camARotation = new Rotation(yAxis, Math.toRadians(-45)); final Rotation camBRotation = new Rotation(yAxis, Math.toRadians(45)); final Vector3D zeroPoint = new Vector3D(0.0, 0.0, 0.0); // cache a zero point for efficiency // make sure all devices have arrived, e.g. a stage isn't still moving try { core_.waitForSystem(); } catch (Exception e) { ReportingUtils.logError("error waiting for system"); } // Loop over all the times we trigger the controller's acquisition // (although if multi-channel with volume switching is selected there // is inner loop to trigger once per channel) // remember acquisition start time for software-timed timepoints // For hardware-timed timepoints we only trigger the controller once long acqStart = System.currentTimeMillis(); for (int trigNum = 0; trigNum < nrFrames; trigNum++) { // handle intervals between (software-timed) time points // when we are within the same acquisition // (if separate viewer is selected then nothing bad happens here // but waiting during interval handled elsewhere) long acqNow = System.currentTimeMillis(); long delay = acqStart + trigNum * timepointIntervalMs - acqNow; while (delay > 0 && !cancelAcquisition_.get()) { updateAcquisitionStatus(AcquisitionStatus.WAITING, (int) (delay / 1000)); long sleepTime = Math.min(1000, delay); Thread.sleep(sleepTime); acqNow = System.currentTimeMillis(); delay = acqStart + trigNum * timepointIntervalMs - acqNow; } // check for stop button before each time point if (cancelAcquisition_.get()) { throw new IllegalMonitorStateException("User stopped the acquisition"); } int timePoint = acqSettings.separateTimepoints ? acqNum : trigNum; // this is where we autofocus if requested if (acqSettings.useAutofocus) { // Note that we will not autofocus as expected when using hardware // timing. Seems OK, since hardware timing will result in short // acquisition times that do not need autofocus. We have already // ensured that we aren't doing both if ((autofocusAtT0 && timePoint == 0) || ((timePoint > 0) && (timePoint % autofocusEachNFrames == 0))) { if (acqSettings.useChannels) { multiChannelPanel_.selectChannel(autofocusChannel); } if (sideActiveA) { AutofocusUtils.FocusResult score = autofocus_.runFocus(this, Devices.Sides.A, false, sliceTiming_, false); updateCalibrationOffset(Devices.Sides.A, score); } if (sideActiveB) { AutofocusUtils.FocusResult score = autofocus_.runFocus(this, Devices.Sides.B, false, sliceTiming_, false); updateCalibrationOffset(Devices.Sides.B, score); } // Restore settings of the controller controller_.prepareControllerForAquisition(acqSettings); if (acqSettings.useChannels && acqSettings.channelMode != MultichannelModes.Keys.VOLUME) { controller_.setupHardwareChannelSwitching(acqSettings); } // make sure circular buffer is cleared core_.clearCircularBuffer(); } } numTimePointsDone_++; updateAcquisitionStatus(AcquisitionStatus.ACQUIRING); // loop over all positions for (int positionNum = 0; positionNum < nrPositions; positionNum++) { if (acqSettings.useMultiPositions) { // make sure user didn't stop things if (cancelAcquisition_.get()) { throw new IllegalMonitorStateException("User stopped the acquisition"); } // want to move between positions move stage fast, so we // will clobber stage scanning setting so need to restore it float scanXSpeed = 1f; float scanXAccel = 1f; if (acqSettings.isStageScanning) { scanXSpeed = props_.getPropValueFloat(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED); props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED, origXSpeed); scanXAccel = props_.getPropValueFloat(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL); props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL, origXAccel); } final MultiStagePosition nextPosition = positionList.getPosition(positionNum); // blocking call; will wait for stages to move MultiStagePosition.goToPosition(nextPosition, core_); // for stage scanning: restore speed and set up scan at new position // non-multi-position situation is handled in prepareControllerForAquisition instead if (acqSettings.isStageScanning) { props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED, scanXSpeed); props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL, scanXAccel); StagePosition pos = nextPosition.get(devices_.getMMDevice(Devices.Keys.XYSTAGE)); // get ideal position from position list, not current position controller_.prepareStageScanForAcquisition(pos.x, pos.y); } refreshXYZPositions(); // wait any extra time the user requests Thread.sleep(Math.round(PanelUtils.getSpinnerFloatValue(positionDelay_))); } // loop over all the times we trigger the controller // usually just once, but will be the number of channels if we have // multiple channels and aren't using PLogic to change between them for (int channelNum = 0; channelNum < nrChannelsSoftware; channelNum++) { try { // flag that we are using the cameras/controller ASIdiSPIM.getFrame().setHardwareInUse(true); // deal with shutter before starting acquisition shutterOpen = core_.getShutterOpen(); if (autoShutter) { core_.setAutoShutter(false); if (!shutterOpen) { core_.setShutterOpen(true); } } // start the cameras core_.startSequenceAcquisition(firstCamera, nrSlicesSoftware, 0, true); if (twoSided || acqBothCameras) { core_.startSequenceAcquisition(secondCamera, nrSlicesSoftware, 0, true); } // deal with channel if needed (hardware channel switching doesn't happen here) if (changeChannelPerVolumeSoftware) { multiChannelPanel_.selectNextChannel(); } // special case: single-sided piezo acquisition risks illumination piezo sleeping // prevent this from happening by sending relative move of 0 like we do in live mode before each trigger // NB: this won't help for hardware-timed timepoints final Devices.Keys piezoIllumKey = firstSideA ? Devices.Keys.PIEZOB : Devices.Keys.PIEZOA; if (!twoSided && props_.getPropValueInteger(piezoIllumKey, Properties.Keys.AUTO_SLEEP_DELAY) > 0) { core_.setRelativePosition(devices_.getMMDevice(piezoIllumKey), 0); } // trigger the state machine on the controller // do this even with demo cameras to test everything else boolean success = controller_.triggerControllerStartAcquisition(spimMode, firstSideA); if (!success) { throw new Exception("Controller triggering not successful"); } ReportingUtils.logDebugMessage("Starting time point " + (timePoint + 1) + " of " + nrFrames + " with (software) channel number " + channelNum); // Wait for first image to create ImageWindow, so that we can be sure about image size // Do not actually grab first image here, just make sure it is there long start = System.currentTimeMillis(); long now = start; final long timeout = Math.max(3000, Math.round(10 * sliceDuration + 2 * acqSettings.delayBeforeSide)) + extraStageScanTimeout + extraMultiXYTimeout; while (core_.getRemainingImageCount() == 0 && (now - start < timeout) && !cancelAcquisition_.get()) { now = System.currentTimeMillis(); Thread.sleep(5); } if (now - start >= timeout) { String msg = "Camera did not send first image within a reasonable time.\n"; if (acqSettings.isStageScanning) { msg += "Make sure jumpers are correct on XY card and also micro-micromirror card."; } else { msg += "Make sure camera trigger cables are connected properly."; } throw new Exception(msg); } // grab all the images from the cameras, put them into the acquisition int[] channelImageNr = new int[4 * acqSettings.numChannels]; // keep track of how many frames we have received for each MM "channel" int[] cameraImageNr = new int[2]; // keep track of how many images we have received from the camera int[] tpNumber = new int[2 * acqSettings.numChannels]; // keep track of which timepoint we are on for hardware timepoints int imagesToSkip = 0; // hardware timepoints have to drop spurious images with overlap mode final boolean checkForSkips = acqSettings.hardwareTimepoints && (acqSettings.cameraMode == CameraModes.Keys.OVERLAP); boolean done = false; long timeout2 = Math.max(1000, Math.round(5 * sliceDuration)); if (acqSettings.isStageScanning) { // for stage scanning have to allow extra time for turn-around timeout2 += (2 * (long) Math.ceil(getStageRampDuration(acqSettings))); // ramp up and then down timeout2 += 5000; // ample extra time for turn-around (e.g. antibacklash move in Y), interestingly 500ms extra seems insufficient for reasons I don't understand yet so just pad this for now // TODO figure out why turn-aronud is taking so long if (acqSettings.spimMode == AcquisitionModes.Keys.STAGE_SCAN_UNIDIRECTIONAL) { timeout2 += (long) Math.ceil(getStageRetraceDuration(acqSettings)); // in unidirectional case also need to rewind } } start = System.currentTimeMillis(); long last = start; try { while ((core_.getRemainingImageCount() > 0 || core_.isSequenceRunning(firstCamera) || ((twoSided || acqBothCameras) && core_.isSequenceRunning(secondCamera))) && !done) { now = System.currentTimeMillis(); if (core_.getRemainingImageCount() > 0) { // we have an image to grab TaggedImage timg = core_.popNextTaggedImage(); if (checkForSkips && imagesToSkip != 0) { imagesToSkip--; continue; // goes to next iteration of this loop without doing anything else } // figure out which channel index this frame belongs to // "channel index" is channel of MM acquisition // channel indexes will go from 0 to (numSides * numChannels - 1) for standard (non-reflective) imaging // if double-sided then second camera gets odd channel indexes (1, 3, etc.) // and adjacent pairs will be same color (e.g. 0 and 1 will be from first color, 2 and 3 from second, etc.) // if acquisition from both cameras (reflective imaging) then // second half of channel indices are from opposite (epi) view // e.g. for 3-color 1-sided (A first) standard (non-reflective) then // 0 will be A-illum A-cam 1st color // 2 will be A-illum A-cam 2nd color // 4 will be A-illum A-cam 3rd color // e.g. for 3-color 2-sided (A first) standard (non-reflective) then // 0 will be A-illum A-cam 1st color // 1 will be B-illum B-cam 1st color // 2 will be A-illum A-cam 2nd color // 3 will be B-illum B-cam 2nd color // 4 will be A-illum A-cam 3rd color // 5 will be B-illum B-cam 3rd color // e.g. for 3-color 1-sided (A first) both camera (reflective) then // 0 will be A-illum A-cam 1st color // 1 will be A-illum A-cam 2nd color // 2 will be A-illum A-cam 3rd color // 3 will be A-illum B-cam 1st color // 4 will be A-illum B-cam 2nd color // 5 will be A-illum B-cam 3rd color // e.g. for 3-color 2-sided (A first) both camera (reflective) then // 0 will be A-illum A-cam 1st color // 1 will be B-illum B-cam 1st color // 2 will be A-illum A-cam 2nd color // 3 will be B-illum B-cam 2nd color // 4 will be A-illum A-cam 3rd color // 5 will be B-illum B-cam 3rd color // 6 will be A-illum B-cam 1st color // 7 will be B-illum A-cam 1st color // 8 will be A-illum B-cam 2nd color // 9 will be B-illum A-cam 2nd color // 10 will be A-illum B-cam 3rd color // 11 will be B-illum A-cam 3rd color String camera = (String) timg.tags.get("Camera"); int cameraIndex = camera.equals(firstCamera) ? 0 : 1; int channelIndex_tmp; switch (acqSettings.channelMode) { case NONE: case VOLUME: channelIndex_tmp = channelNum; break; case VOLUME_HW: channelIndex_tmp = cameraImageNr[cameraIndex] / acqSettings.numSlices; // want quotient only break; case SLICE_HW: channelIndex_tmp = cameraImageNr[cameraIndex] % acqSettings.numChannels; // want modulo arithmetic break; default: // should never get here throw new Exception("Undefined channel mode"); } if (acqBothCameras) { if (twoSided) { // 2-sided, both cameras channelIndex_tmp = channelIndex_tmp * 2 + cameraIndex; // determine whether first or second side by whether we've seen half the images yet if (cameraImageNr[cameraIndex] > nrSlicesSoftware / 2) { // second illumination side => second half of channels channelIndex_tmp += 2 * acqSettings.numChannels; } } else { // 1-sided, both cameras channelIndex_tmp += cameraIndex * acqSettings.numChannels; } } else { // normal situation, non-reflective imaging if (twoSided) { channelIndex_tmp *= 2; } channelIndex_tmp += cameraIndex; } final int channelIndex = channelIndex_tmp; int actualTimePoint = timePoint; if (acqSettings.hardwareTimepoints) { actualTimePoint = tpNumber[channelIndex]; } if (acqSettings.separateTimepoints) { // if we are doing separate timepoints then frame is always 0 actualTimePoint = 0; } // note that hardwareTimepoints and separateTimepoints can never both be true // add image to acquisition if (spimMode == AcquisitionModes.Keys.NO_SCAN && !acqSettings.separateTimepoints) { // create time series for no scan addImageToAcquisition(acq_, channelImageNr[channelIndex], channelIndex, actualTimePoint, positionNum, now - acqStart, timg, bq); } else { // standard, create Z-stacks addImageToAcquisition(acq_, actualTimePoint, channelIndex, channelImageNr[channelIndex], positionNum, now - acqStart, timg, bq); } // update our counters to be ready for next image channelImageNr[channelIndex]++; cameraImageNr[cameraIndex]++; // if hardware timepoints then we only send one trigger and // manually keep track of which channel/timepoint comes next if (acqSettings.hardwareTimepoints && channelImageNr[channelIndex] >= acqSettings.numSlices) { // only do this if we are done with the slices in this MM channel // we just finished filling one MM channel with all its slices so go to next timepoint for this channel channelImageNr[channelIndex] = 0; tpNumber[channelIndex]++; // see if we are supposed to skip next image if (checkForSkips) { // one extra image per MM channel, this includes case of only 1 color (either multi-channel disabled or else only 1 channel selected) // if we are interleaving by slice then next nrChannel images will be from extra slice position // any other configuration we will just drop the next image if (acqSettings.useChannels && acqSettings.channelMode == MultichannelModes.Keys.SLICE_HW) { imagesToSkip = acqSettings.numChannels; } else { imagesToSkip = 1; } } // update acquisition status message for hardware acquisition // (for non-hardware acquisition message is updated elsewhere) // Arbitrarily choose one possible channel to do this on. if (channelIndex == 0 && (numTimePointsDone_ < acqSettings.numTimepoints)) { numTimePointsDone_++; updateAcquisitionStatus(AcquisitionStatus.ACQUIRING); } } last = now; // keep track of last image timestamp } else { // no image ready yet done = cancelAcquisition_.get(); Thread.sleep(1); if (now - last >= timeout2) { ReportingUtils .logError("Camera did not send all expected images within" + " a reasonable period for timepoint " + numTimePointsDone_ + ". Continuing anyway."); nonfatalError = true; done = true; } } } // update count if we stopped in the middle if (cancelAcquisition_.get()) { numTimePointsDone_--; } // if we are using demo camera then add some extra time to let controller finish // since we got images without waiting for controller to actually send triggers if (usingDemoCam) { Thread.sleep(200); // for serial communication overhead Thread.sleep((long) volumeDuration / nrChannelsSoftware); // estimate the time per channel, not ideal in case of software channel switching if (acqSettings.isStageScanning) { Thread.sleep(1000 + extraStageScanTimeout); // extra 1 second plus ramp time for stage scanning } } } catch (InterruptedException iex) { MyDialogUtils.showError(iex); } if (acqSettings.hardwareTimepoints) { break; // only trigger controller once } } catch (Exception ex) { MyDialogUtils.showError(ex); } finally { // cleanup at the end of each time we trigger the controller ASIdiSPIM.getFrame().setHardwareInUse(false); // put shutter back to original state core_.setShutterOpen(shutterOpen); core_.setAutoShutter(autoShutter); // make sure cameras aren't running anymore if (core_.isSequenceRunning(firstCamera)) { core_.stopSequenceAcquisition(firstCamera); } if ((twoSided || acqBothCameras) && core_.isSequenceRunning(secondCamera)) { core_.stopSequenceAcquisition(secondCamera); } // make sure SPIM state machine on micromirror and SCAN of XY card are stopped (should normally be but sanity check) if ((acqSettings.numSides > 1) || acqSettings.firstSideIsA) { props_.setPropValue(Devices.Keys.GALVOA, Properties.Keys.SPIM_STATE, Properties.Values.SPIM_IDLE, true); } if ((acqSettings.numSides > 1) || !acqSettings.firstSideIsA) { props_.setPropValue(Devices.Keys.GALVOB, Properties.Keys.SPIM_STATE, Properties.Values.SPIM_IDLE, true); } if (acqSettings.isStageScanning) { props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_STATE, Properties.Values.SPIM_IDLE); } } } if (acqSettings.useMovementCorrection && (timePoint % correctMovementEachNFrames) == 0) { if (movementDetectors[positionNum] == null) { // Transform from camera space to stage space: Rotation rotation = camBRotation; if (firstSideA) { rotation = camARotation; } movementDetectors[positionNum] = new MovementDetector(prefs_, acq_, cmChannelNumber, positionNum, rotation); } Vector3D movement = movementDetectors[positionNum] .detectMovement(Method.PhaseCorrelation); String msg1 = "TimePoint: " + timePoint + ", Detected movement. X: " + movement.getX() + ", Y: " + movement.getY() + ", Z: " + movement.getZ(); System.out.println(msg1); if (!movement.equals(zeroPoint)) { String msg = "ASIdiSPIM motion corrector moving stages: X: " + movement.getX() + ", Y: " + movement.getY() + ", Z: " + movement.getZ(); gui_.logMessage(msg); System.out.println(msg); // if we are using the position list, update the position in the list if (acqSettings.useMultiPositions) { MultiStagePosition position = positionList.getPosition(positionNum); StagePosition pos = position.get(devices_.getMMDevice(Devices.Keys.XYSTAGE)); pos.x += movement.getX(); pos.y += movement.getY(); StagePosition zPos = position .get(devices_.getMMDevice(Devices.Keys.UPPERZDRIVE)); if (zPos != null) { zPos.x += movement.getZ(); } } else { // only a single position, move the stage now core_.setRelativeXYPosition(devices_.getMMDevice(Devices.Keys.XYSTAGE), movement.getX(), movement.getY()); core_.setRelativePosition(devices_.getMMDevice(Devices.Keys.UPPERZDRIVE), movement.getZ()); } } } } if (acqSettings.hardwareTimepoints) { break; } } } catch (IllegalMonitorStateException ex) { // do nothing, the acquisition was simply halted during its operation // will log error message during finally clause } catch (MMScriptException mex) { MyDialogUtils.showError(mex); } catch (Exception ex) { MyDialogUtils.showError(ex); } finally { // end of this acquisition (could be about to restart if separate viewers) try { // restore original window listeners try { vad.getImagePlus().getWindow().removeWindowListener(wl_acq); for (WindowListener l : wls_orig) { vad.getImagePlus().getWindow().addWindowListener(l); } } catch (Exception ex) { // do nothing, window is probably gone } if (cancelAcquisition_.get()) { ReportingUtils.logMessage("User stopped the acquisition"); } bq.put(TaggedImageQueue.POISON); // TODO: evaluate closeAcquisition call // at the moment, the Micro-Manager api has a bug that causes // a closed acquisition not be really closed, causing problems // when the user closes a window of the previous acquisition // changed r14705 (2014-11-24) // gui_.closeAcquisition(acqName); ReportingUtils.logMessage("diSPIM plugin acquisition " + acqName + " took: " + (System.currentTimeMillis() - acqButtonStart) + "ms"); // while(gui_.isAcquisitionRunning()) { // Thread.sleep(10); // ReportingUtils.logMessage("waiting for acquisition to finish."); // } // flag that we are done with acquisition acquisitionRunning_.set(false); // write acquisition settings if requested if (lastAcquisitionPath_ != null && prefs_.getBoolean(MyStrings.PanelNames.SETTINGS.toString(), Properties.Keys.PLUGIN_WRITE_ACQ_SETTINGS_FILE, false)) { String path = ""; try { path = lastAcquisitionPath_ + File.separator + "AcqSettings.txt"; PrintWriter writer = new PrintWriter(path); writer.println(acqSettingsJSON); writer.flush(); writer.close(); } catch (Exception ex) { MyDialogUtils.showError(ex, "Could not save acquisition settings to file as requested to path " + path); } } } catch (Exception ex) { // exception while stopping sequence acquisition, not sure what to do... MyDialogUtils.showError(ex, "Problem while finishing acquisition"); } } } // for loop over acquisitions // cleanup after end of all acquisitions // TODO be more careful and always do these if we actually started acquisition, // even if exception happened cameras_.setCameraForAcquisition(firstCameraKey, false); if (twoSided || acqBothCameras) { cameras_.setCameraForAcquisition(secondCameraKey, false); } // restore exposure times of SPIM cameras try { core_.setExposure(firstCamera, prefs_.getFloat(MyStrings.PanelNames.SETTINGS.toString(), Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_FIRST.toString(), 10f)); if (twoSided || acqBothCameras) { core_.setExposure(secondCamera, prefs_.getFloat(MyStrings.PanelNames.SETTINGS.toString(), Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_SECOND.toString(), 10f)); } gui_.refreshGUIFromCache(); } catch (Exception ex) { MyDialogUtils.showError("Could not restore exposure after acquisition"); } // reset channel to original if we clobbered it if (acqSettings.useChannels) { multiChannelPanel_.setConfig(originalChannelConfig); } // clean up controller settings after acquisition // want to do this, even with demo cameras, so we can test everything else // TODO figure out if we really want to return piezos to 0 position (maybe center position, // maybe not at all since we move when we switch to setup tab, something else??) controller_.cleanUpControllerAfterAcquisition(acqSettings.numSides, acqSettings.firstSideIsA, true); // if we did stage scanning restore its position and speed if (acqSettings.isStageScanning) { try { // make sure stage scanning state machine is stopped, otherwise setting speed/position won't take props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_STATE, Properties.Values.SPIM_IDLE); props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED, origXSpeed); props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL, origXAccel); core_.setXYPosition(devices_.getMMDevice(Devices.Keys.XYSTAGE), xyPosUm.x, xyPosUm.y); } catch (Exception ex) { MyDialogUtils.showError("Could not restore XY stage position after acquisition"); } } updateAcquisitionStatus(AcquisitionStatus.DONE); posUpdater_.pauseUpdates(false); if (testAcq && prefs_.getBoolean(MyStrings.PanelNames.SETTINGS.toString(), Properties.Keys.PLUGIN_TESTACQ_SAVE, false)) { String path = ""; try { path = prefs_.getString(MyStrings.PanelNames.SETTINGS.toString(), Properties.Keys.PLUGIN_TESTACQ_PATH, ""); IJ.saveAs(acq_.getAcquisitionWindow().getImagePlus(), "raw", path); // TODO consider generating a short metadata file to assist in interpretation } catch (Exception ex) { MyDialogUtils.showError("Could not save raw data from test acquisition to path " + path); } } if (separateImageFilesOriginally) { ImageUtils.setImageStorageClass(TaggedImageStorageDiskDefault.class); } // restore camera try { core_.setCameraDevice(originalCamera); } catch (Exception ex) { MyDialogUtils.showError("Could not restore camera after acquisition"); } if (liveModeOriginally) { gui_.enableLiveMode(true); } if (nonfatalError) { MyDialogUtils.showError("Missed some images during acquisition, see core log for details"); } return true; }
From source file:org.micromanager.plugins.magellan.propsandcovariants.LaserPredNet.java
/** * * @return return distance to surface interpolation based on x y and z points */// w w w. j a v a2s . c o m private static double getSampledDistancesToSurface(int angleIndex, double x, double y, double z, SurfaceInterpolator surface) throws InterruptedException { double dTheta = Math.PI * 2.0 / (double) N_THETA_ANGLES; Vector3D initialPoint = new Vector3D(x, y, z); double[] distances = new double[N_THETA_ANGLES]; double theta = angleIndex * dTheta; //calculate unit vector in theta phi direction Vector3D directionUnitVec = new Vector3D(Math.cos(theta) * Math.sin(PHI), Math.sin(theta) * Math.sin(PHI), Math.cos(PHI)).scalarMultiply(-1); //binary search double initialDist = SEARCH_START_DIST; //start with a point outside and then binary line search for the distance while (isWithinSurace(surface, initialPoint.add(directionUnitVec.scalarMultiply(initialDist)))) { initialDist *= 2; } return binarySearch(initialPoint, directionUnitVec, 0, initialDist, surface); }
From source file:org.micromanager.plugins.magellan.surfacesandregions.SurfaceInterpolatorSimple.java
protected void interpolateSurface(LinkedList<Point3d> points) throws InterruptedException { double pixSize = Magellan.getCore().getPixelSizeUm(); //provide interpolator with current list of data points Point_dt triangulationPoints[] = new Point_dt[points.size()]; for (int i = 0; i < points.size(); i++) { triangulationPoints[i] = new Point_dt(points.get(i).x, points.get(i).y, points.get(i).z); }//from w w w .j ava2 s .co m Delaunay_Triangulation dTri = new Delaunay_Triangulation(triangulationPoints); int maxPixelDimension = (int) (Math.max(boundXMax_ - boundXMin_, boundYMax_ - boundYMin_) / pixSize); //Start with at least 20 interp points and go smaller and smaller until every pixel interped? int pixelsPerInterpPoint = 1; while (maxPixelDimension / (pixelsPerInterpPoint + 1) > 20) { pixelsPerInterpPoint *= 2; } if (Thread.interrupted()) { throw new InterruptedException(); } while (pixelsPerInterpPoint >= MIN_PIXELS_PER_INTERP_POINT) { int numInterpPointsX = (int) (((boundXMax_ - boundXMin_) / pixSize) / pixelsPerInterpPoint); int numInterpPointsY = (int) (((boundYMax_ - boundYMin_) / pixSize) / pixelsPerInterpPoint); double dx = (boundXMax_ - boundXMin_) / (numInterpPointsX - 1); double dy = (boundYMax_ - boundYMin_) / (numInterpPointsY - 1); float[][] interpVals = new float[numInterpPointsY][numInterpPointsX]; float[][] interpNormals = new float[numInterpPointsY][numInterpPointsX]; boolean[][] interpDefined = new boolean[numInterpPointsY][numInterpPointsX]; for (int yInd = 0; yInd < interpVals.length; yInd++) { for (int xInd = 0; xInd < interpVals[0].length; xInd++) { if (Thread.interrupted()) { throw new InterruptedException(); } double xVal = boundXMin_ + dx * xInd; double yVal = boundYMin_ + dy * yInd; boolean inHull = convexHullRegion_ .checkPoint(new Vector2D(xVal, yVal)) == Region.Location.INSIDE; if (inHull) { Triangle_dt tri = dTri.find(new Point_dt(xVal, yVal)); //convert to apache commons coordinates to make a plane Vector3D v1 = new Vector3D(tri.p1().x(), tri.p1().y(), tri.p1().z()); Vector3D v2 = new Vector3D(tri.p2().x(), tri.p2().y(), tri.p2().z()); Vector3D v3 = new Vector3D(tri.p3().x(), tri.p3().y(), tri.p3().z()); Plane plane = new Plane(v1, v2, v3, TOLERANCE); //intersetion of vertical line at these x+y values with plane gives point in plane Vector3D pointInPlane = plane.intersection( new Line(new Vector3D(xVal, yVal, 0), new Vector3D(xVal, yVal, 1), TOLERANCE)); float zVal = (float) pointInPlane.getZ(); interpVals[yInd][xInd] = zVal; float angle = (float) (Vector3D.angle(plane.getNormal(), new Vector3D(0, 0, 1)) / Math.PI * 180.0); interpNormals[yInd][xInd] = angle; interpDefined[yInd][xInd] = true; } else { interpDefined[yInd][xInd] = false; } } } if (Thread.interrupted()) { throw new InterruptedException(); } synchronized (interpolationLock_) { currentInterpolation_ = new SingleResolutionInterpolation(pixelsPerInterpPoint, interpDefined, interpVals, interpNormals, boundXMin_, boundXMax_, boundYMin_, boundYMax_, convexHullRegion_, convexHullVertices_, getPoints()); interpolationLock_.notifyAll(); } // System.gc(); pixelsPerInterpPoint /= 2; } }
From source file:org.micromanager.plugins.magellan.surfacesandregions.SurfaceInterpolatorSimple.java
@Override public float getExtrapolatedValue(double x, double y) { //duplicate points for thread safety final LinkedList<Point3d> points = new LinkedList<Point3d>(points_); //find 3 closest points and calculate value //find closest convex hull vertex final LinkedList<Integer> closestIndices = new LinkedList<Integer>(); final LinkedList<Double> closestDistances = new LinkedList<Double>(); for (int i = 0; i < points.size(); i++) { //get current distance Vector2D vertex = new Vector2D(points.get(i).x, points.get(i).y); double distance = vertex.distance(new Vector2D(x, y)); if (closestDistances.size() < 3) { closestIndices.add(i);//from ww w . ja v a2s .c om closestDistances.add(distance); } else if (distance < closestDistances.get(2)) { closestIndices.removeLast(); closestDistances.removeLast(); closestIndices.add(i); closestDistances.add(distance); } //sort Collections.sort(closestIndices, new Comparator<Integer>() { public int compare(Integer left, Integer right) { return (new Double(closestDistances.get(closestIndices.indexOf(left)))) .compareTo(closestDistances.get(closestIndices.indexOf(right))); } }); Collections.sort(closestDistances); } Point3d point1 = points.get(closestIndices.get(0)); Point3d point2 = points.get(closestIndices.get(1)); Point3d point3 = points.get(closestIndices.get(2)); Vector3D v1 = new Vector3D(point1.x, point1.y, point1.z); Vector3D v2 = new Vector3D(point2.x, point2.y, point2.z); Vector3D v3 = new Vector3D(point3.x, point3.y, point3.z); Plane plane = new Plane(v1, v2, v3, TOLERANCE); //intersetion of vertical line at these x+y values with plane gives point in plane Vector3D pointInPlane = plane .intersection(new Line(new Vector3D(x, y, 0), new Vector3D(x, y, 1), TOLERANCE)); float zVal = (float) pointInPlane.getZ(); return zVal; }
From source file:org.orekit.attitudes.AttitudesSequenceTest.java
@Test public void testDayNightSwitch() throws OrekitException { // Initial state definition : date, orbit final AbsoluteDate initialDate = new AbsoluteDate(2004, 01, 01, 23, 30, 00.000, TimeScalesFactory.getUTC()); final Vector3D position = new Vector3D(-6142438.668, 3492467.560, -25767.25680); final Vector3D velocity = new Vector3D(505.8479685, 942.7809215, 7435.922231); final Orbit initialOrbit = new KeplerianOrbit(new PVCoordinates(position, velocity), FramesFactory.getEME2000(), initialDate, Constants.EIGEN5C_EARTH_MU); final/*from ww w . j a v a 2 s . c om*/ // Attitudes sequence definition EventsLogger logger = new EventsLogger(); final AttitudesSequence attitudesSequence = new AttitudesSequence(); final AttitudeProvider dayObservationLaw = new LofOffset(initialOrbit.getFrame(), LOFType.VVLH, RotationOrder.XYZ, FastMath.toRadians(20), FastMath.toRadians(40), 0); final AttitudeProvider nightRestingLaw = new LofOffset(initialOrbit.getFrame(), LOFType.VVLH); final PVCoordinatesProvider sun = CelestialBodyFactory.getSun(); final PVCoordinatesProvider earth = CelestialBodyFactory.getEarth(); final EclipseDetector ed = new EclipseDetector(sun, 696000000., earth, Constants.WGS84_EARTH_EQUATORIAL_RADIUS).withHandler(new ContinueOnEvent<EclipseDetector>() { public EventHandler.Action eventOccurred(final SpacecraftState s, final EclipseDetector d, final boolean increasing) { setInEclipse(s.getDate(), !increasing); return EventHandler.Action.CONTINUE; } }); final EventDetector monitored = logger.monitorDetector(ed); final Handler dayToNightHandler = new Handler(dayObservationLaw, nightRestingLaw); final Handler nightToDayHandler = new Handler(nightRestingLaw, dayObservationLaw); attitudesSequence.addSwitchingCondition(dayObservationLaw, nightRestingLaw, monitored, false, true, 300.0, AngularDerivativesFilter.USE_RRA, dayToNightHandler); attitudesSequence.addSwitchingCondition(nightRestingLaw, dayObservationLaw, monitored, true, false, 300.0, AngularDerivativesFilter.USE_RRA, nightToDayHandler); if (ed.g(new SpacecraftState(initialOrbit)) >= 0) { // initial position is in daytime setInEclipse(initialDate, false); attitudesSequence.resetActiveProvider(dayObservationLaw); } else { // initial position is in nighttime setInEclipse(initialDate, true); attitudesSequence.resetActiveProvider(nightRestingLaw); } // Propagator : consider the analytical Eckstein-Hechler model final Propagator propagator = new EcksteinHechlerPropagator(initialOrbit, attitudesSequence, Constants.EIGEN5C_EARTH_EQUATORIAL_RADIUS, Constants.EIGEN5C_EARTH_MU, Constants.EIGEN5C_EARTH_C20, Constants.EIGEN5C_EARTH_C30, Constants.EIGEN5C_EARTH_C40, Constants.EIGEN5C_EARTH_C50, Constants.EIGEN5C_EARTH_C60); // Register the switching events to the propagator attitudesSequence.registerSwitchEvents(propagator); propagator.setMasterMode(60.0, new OrekitFixedStepHandler() { public void init(final SpacecraftState s0, final AbsoluteDate t) { } public void handleStep(SpacecraftState currentState, boolean isLast) throws PropagationException { try { // the Earth position in spacecraft frame should be along spacecraft Z axis // during night time and away from it during day time due to roll and pitch offsets final Vector3D earth = currentState.toTransform().transformPosition(Vector3D.ZERO); final double pointingOffset = Vector3D.angle(earth, Vector3D.PLUS_K); // the g function is the eclipse indicator, its an angle between Sun and Earth limb, // positive when Sun is outside of Earth limb, negative when Sun is hidden by Earth limb final double eclipseAngle = ed.g(currentState); if (currentState.getDate().durationFrom(lastChange) > 300) { if (inEclipse) { Assert.assertTrue(eclipseAngle <= 0); Assert.assertEquals(0.0, pointingOffset, 1.0e-6); } else { Assert.assertTrue(eclipseAngle >= 0); Assert.assertEquals(0.767215, pointingOffset, 1.0e-6); } } else { // we are in transition Assert.assertTrue(pointingOffset + " " + (0.767215 - pointingOffset), pointingOffset <= 0.7672155); } } catch (OrekitException oe) { throw new PropagationException(oe); } } }); // Propagate from the initial date for the fixed duration propagator.propagate(initialDate.shiftedBy(12600.)); // as we have 2 switch events (even if they share the same underlying event detector), // and these events are triggered at both eclipse entry and exit, we get 8 // raw events on 2 orbits Assert.assertEquals(8, logger.getLoggedEvents().size()); // we have 4 attitudes switch on 2 orbits, 2 of each type Assert.assertEquals(2, dayToNightHandler.dates.size()); Assert.assertEquals(2, nightToDayHandler.dates.size()); }
From source file:org.orekit.attitudes.AttitudesSequenceTest.java
@Test public void testBackwardPropagation() throws OrekitException { // Initial state definition : date, orbit final AbsoluteDate initialDate = new AbsoluteDate(2004, 01, 01, 23, 30, 00.000, TimeScalesFactory.getUTC()); final Vector3D position = new Vector3D(-6142438.668, 3492467.560, -25767.25680); final Vector3D velocity = new Vector3D(505.8479685, 942.7809215, 7435.922231); final Orbit initialOrbit = new KeplerianOrbit(new PVCoordinates(position, velocity), FramesFactory.getEME2000(), initialDate, Constants.EIGEN5C_EARTH_MU); final AttitudesSequence attitudesSequence = new AttitudesSequence(); final AttitudeProvider past = new InertialProvider(Rotation.IDENTITY); final AttitudeProvider current = new InertialProvider(Rotation.IDENTITY); final AttitudeProvider future = new InertialProvider(Rotation.IDENTITY); final Handler handler = new Handler(current, past); attitudesSequence.addSwitchingCondition(past, current, new DateDetector(initialDate.shiftedBy(-500.0)), true, false, 10.0, AngularDerivativesFilter.USE_R, handler); attitudesSequence.addSwitchingCondition(current, future, new DateDetector(initialDate.shiftedBy(+500.0)), true, false, 10.0, AngularDerivativesFilter.USE_R, null); attitudesSequence.resetActiveProvider(current); final Propagator propagator = new EcksteinHechlerPropagator(initialOrbit, attitudesSequence, Constants.EIGEN5C_EARTH_EQUATORIAL_RADIUS, Constants.EIGEN5C_EARTH_MU, Constants.EIGEN5C_EARTH_C20, Constants.EIGEN5C_EARTH_C30, Constants.EIGEN5C_EARTH_C40, Constants.EIGEN5C_EARTH_C50, Constants.EIGEN5C_EARTH_C60); // Register the switching events to the propagator attitudesSequence.registerSwitchEvents(propagator); SpacecraftState finalState = propagator.propagate(initialDate.shiftedBy(-10000.0)); Assert.assertEquals(1, handler.dates.size()); Assert.assertEquals(-500.0, handler.dates.get(0).durationFrom(initialDate), 1.0e-3); Assert.assertEquals(-490.0, finalState.getDate().durationFrom(initialDate), 1.0e-3); }
From source file:org.orekit.attitudes.AttitudeTest.java
@Test public void testInterpolation() throws OrekitException { Utils.setDataRoot("regular-data"); final double ehMu = 3.9860047e14; final double ae = 6.378137e6; final double c20 = -1.08263e-3; final double c30 = 2.54e-6; final double c40 = 1.62e-6; final double c50 = 2.3e-7; final double c60 = -5.5e-7; final AbsoluteDate date = AbsoluteDate.J2000_EPOCH.shiftedBy(584.); final Vector3D position = new Vector3D(3220103., 69623., 6449822.); final Vector3D velocity = new Vector3D(6414.7, -2006., -3180.); final CircularOrbit initialOrbit = new CircularOrbit(new PVCoordinates(position, velocity), FramesFactory.getEME2000(), date, ehMu); EcksteinHechlerPropagator propagator = new EcksteinHechlerPropagator(initialOrbit, ae, ehMu, c20, c30, c40, c50, c60);/* w w w . j av a 2s. c o m*/ OneAxisEllipsoid earth = new OneAxisEllipsoid(Constants.WGS84_EARTH_EQUATORIAL_RADIUS, Constants.WGS84_EARTH_FLATTENING, FramesFactory.getITRF(IERSConventions.IERS_2010, true)); propagator.setAttitudeProvider(new BodyCenterPointing(initialOrbit.getFrame(), earth)); final Attitude initialAttitude = propagator.propagate(initialOrbit.getDate()).getAttitude(); // set up a 5 points sample List<Attitude> sample = new ArrayList<Attitude>(); for (double dt = 0; dt < 251.0; dt += 60.0) { sample.add(propagator.propagate(date.shiftedBy(dt)).getAttitude()); } // well inside the sample, interpolation should be better than quadratic shift double maxShiftAngleError = 0; double maxInterpolationAngleError = 0; double maxShiftRateError = 0; double maxInterpolationRateError = 0; for (double dt = 0; dt < 240.0; dt += 1.0) { AbsoluteDate t = initialOrbit.getDate().shiftedBy(dt); Attitude propagated = propagator.propagate(t).getAttitude(); double shiftAngleError = Rotation.distance(propagated.getRotation(), initialAttitude.shiftedBy(dt).getRotation()); double interpolationAngleError = Rotation.distance(propagated.getRotation(), initialAttitude.interpolate(t, sample).getRotation()); double shiftRateError = Vector3D.distance(propagated.getSpin(), initialAttitude.shiftedBy(dt).getSpin()); double interpolationRateError = Vector3D.distance(propagated.getSpin(), initialAttitude.interpolate(t, sample).getSpin()); maxShiftAngleError = FastMath.max(maxShiftAngleError, shiftAngleError); maxInterpolationAngleError = FastMath.max(maxInterpolationAngleError, interpolationAngleError); maxShiftRateError = FastMath.max(maxShiftRateError, shiftRateError); maxInterpolationRateError = FastMath.max(maxInterpolationRateError, interpolationRateError); } Assert.assertTrue(maxShiftAngleError > 4.0e-6); Assert.assertTrue(maxInterpolationAngleError < 1.5e-13); Assert.assertTrue(maxShiftRateError > 6.0e-8); Assert.assertTrue(maxInterpolationRateError < 2.5e-14); // past sample end, interpolation error should increase, but still be far better than quadratic shift maxShiftAngleError = 0; maxInterpolationAngleError = 0; maxShiftRateError = 0; maxInterpolationRateError = 0; for (double dt = 250.0; dt < 300.0; dt += 1.0) { AbsoluteDate t = initialOrbit.getDate().shiftedBy(dt); Attitude propagated = propagator.propagate(t).getAttitude(); double shiftAngleError = Rotation.distance(propagated.getRotation(), initialAttitude.shiftedBy(dt).getRotation()); double interpolationAngleError = Rotation.distance(propagated.getRotation(), initialAttitude.interpolate(t, sample).getRotation()); double shiftRateError = Vector3D.distance(propagated.getSpin(), initialAttitude.shiftedBy(dt).getSpin()); double interpolationRateError = Vector3D.distance(propagated.getSpin(), initialAttitude.interpolate(t, sample).getSpin()); maxShiftAngleError = FastMath.max(maxShiftAngleError, shiftAngleError); maxInterpolationAngleError = FastMath.max(maxInterpolationAngleError, interpolationAngleError); maxShiftRateError = FastMath.max(maxShiftRateError, shiftRateError); maxInterpolationRateError = FastMath.max(maxInterpolationRateError, interpolationRateError); } Assert.assertTrue(maxShiftAngleError > 9.0e-6); Assert.assertTrue(maxInterpolationAngleError < 6.0e-11); Assert.assertTrue(maxShiftRateError > 9.0e-8); Assert.assertTrue(maxInterpolationRateError < 4.0e-12); }