⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 viewinfo.java

📁 JAVA3D矩陈的相关类
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
     * first argument and the second argument is not used.  For a stereo     * canvas the first argument receives the left projection transform,     * and if the second argument is non-null it receives the right     * projection transform.<p>     *     * If either of the clip policies <code>VIRTUAL_EYE</code> or     * <code>VIRTUAL_SCREEN</code> are used, then the View should be attached     * to a ViewPlatform that is part of a live scene graph and that has its     * <code>ALLOW_LOCAL_TO_VWORLD_READ</code> capability set; otherwise, a     * scale factor of 1.0 will be used for the scale factor from virtual     * world units to view platform units.     *      * @param c3d the Canvas3D to use     * @param e2ccl the Transform3D to receive left transform     * @param e2ccr the Transform3D to receive right transform, or null     */    public void getProjection(Canvas3D c3d, 			      Transform3D e2ccl, Transform3D e2ccr) {	CanvasInfo ci = updateCache(c3d, "getProjection", true) ;	getProjection(ci) ;	e2ccl.set(ci.projection) ;	if (ci.useStereo && e2ccr != null)	    e2ccr.set(ci.rightProjection) ;    }    private void getProjection(CanvasInfo ci) {	if (ci.updateProjection) {	    if (verbose) System.err.println("updating Projection") ;	    if (ci.projection == null)		ci.projection = new Transform3D() ;	    getEyeToImagePlate(ci) ;	    getClipDistances(ci) ;	    // Note: core Java 3D code insists that the back clip plane	    // relative to the image plate must be the same left back clip	    // distance for both the left and right eye.  Not sure why this	    // should be, but the same is done here for compatibility.	    double backClip = getBackClip(ci, ci.eyeInPlate) ;	    computeProjection(ci, ci.eyeInPlate,			      getFrontClip(ci, ci.eyeInPlate),			      backClip, ci.projection) ;	    if (ci.useStereo) {		if (ci.rightProjection == null)		    ci.rightProjection = new Transform3D() ;		computeProjection(ci, ci.rightEyeInPlate,				  getFrontClip(ci, ci.rightEyeInPlate),				  backClip, ci.rightProjection) ;	    }	    ci.updateProjection = false ;	    if (verbose) t3dPrint(ci.projection, "projection") ;	}    }    /**     * Gets the transforms from clipping coordinates to eye coordinates     * and copies them into the given Transform3Ds.  These transforms take     * the clip space volume bounded by the range [-1.0 .. + 1.0] on each     * of the X, Y, and Z and project it into eye coordinates.<p>     *      * With a monoscopic canvas the projection transform is copied to the     * first argument and the second argument is not used.  For a stereo     * canvas the first argument receives the left projection transform, and     * if the second argument is non-null it receives the right projection     * transform.<p>     *     * If either of the clip policies <code>VIRTUAL_EYE</code> or     * <code>VIRTUAL_SCREEN</code> are used, then the View should be attached     * to a ViewPlatform that is part of a live scene graph and that has its     * <code>ALLOW_LOCAL_TO_VWORLD_READ</code> capability set; otherwise, a     * scale factor of 1.0 will be used for the scale factor from virtual     * world units to view platform units.     *      * @param c3d the Canvas3D to use     * @param cc2el the Transform3D to receive left transform     * @param cc2er the Transform3D to receive right transform, or null     */    public void getInverseProjection(Canvas3D c3d, 				     Transform3D cc2el, Transform3D cc2er) {	CanvasInfo ci = updateCache(c3d, "getInverseProjection", true) ;	getInverseProjection(ci) ;	cc2el.set(ci.inverseProjection) ;	if (ci.useStereo && cc2er != null)	    cc2er.set(ci.inverseRightProjection) ;    }    private void getInverseProjection(CanvasInfo ci) {	if (ci.updateInverseProjection) {	    if (verbose) System.err.println("updating InverseProjection") ;	    if (ci.inverseProjection == null)		ci.inverseProjection = new Transform3D() ;	    getProjection(ci) ;	    ci.inverseProjection.invert(ci.projection) ;	    if (ci.useStereo) {		if (ci.inverseRightProjection == null)		    ci.inverseRightProjection = new Transform3D() ;		ci.inverseRightProjection.invert(ci.rightProjection) ;	    }	    ci.updateInverseProjection = false ;	}    }    /**     * Gets the transforms from clipping coordinates to view platform     * coordinates and copies them into the given Transform3Ds.	 These     * transforms take the clip space volume bounded by the range     * [-1.0 .. +1.0] on each of the X, Y, and Z axes and project into     * the view platform coordinate system.<p>     *      * With a monoscopic canvas the projection transform is copied to the     * first argument and the second argument is not used.  For a stereo     * canvas the first argument receives the left projection transform, and     * if the second argument is non-null it receives the right projection     * transform.<p>     *     * If either of the clip policies <code>VIRTUAL_EYE</code> or     * <code>VIRTUAL_SCREEN</code> are used, then the View should be attached     * to a ViewPlatform that is part of a live scene graph and that has its     * <code>ALLOW_LOCAL_TO_VWORLD_READ</code> capability set; otherwise, a     * scale factor of 1.0 will be used for the scale factor from virtual     * world units to view platform units.     *      * @param c3d the Canvas3D to use     * @param cc2vpl the Transform3D to receive left transform     * @param cc2vpr the Transform3D to receive right transform, or null     */    public void getInverseViewPlatformProjection(Canvas3D c3d, 						 Transform3D cc2vpl,						 Transform3D cc2vpr) {	CanvasInfo ci = updateCache	    (c3d, "getInverseViewPlatformProjection", true) ;	getInverseViewPlatformProjection(ci) ;	cc2vpl.set(ci.inverseViewPlatformProjection) ;	if (ci.useStereo & cc2vpr != null)	    cc2vpr.set(ci.inverseViewPlatformRightProjection) ;    }    private void getInverseViewPlatformProjection(CanvasInfo ci) {	if (ci.updateInverseViewPlatformProjection) {	    if (verbose) System.err.println("updating InverseVpProjection") ;	    if (ci.inverseViewPlatformProjection == null)		ci.inverseViewPlatformProjection = new Transform3D() ;	    getInverseProjection(ci) ;	    getEyeToViewPlatform(ci) ;	    ci.inverseViewPlatformProjection.mul		(ci.eyeToViewPlatform, ci.inverseProjection) ;	    if (ci.useStereo) {		if (ci.inverseViewPlatformRightProjection == null)		    ci.inverseViewPlatformRightProjection = new Transform3D() ;		ci.inverseViewPlatformRightProjection.mul		    (ci.rightEyeToViewPlatform, ci.inverseRightProjection) ;	    }	    ci.updateInverseVworldProjection = false ;	}    }    /**     * Gets the transforms from clipping coordinates to virtual world     * coordinates and copies them into the given Transform3Ds.	 These     * transforms take the clip space volume bounded by the range     * [-1.0 .. +1.0] on each of the X, Y, and Z axes and project into     * the virtual world.<p>     *      * With a monoscopic canvas the projection transform is copied to the     * first argument and the second argument is not used.  For a stereo     * canvas the first argument receives the left projection transform, and     * if the second argument is non-null it receives the right projection     * transform.<p>     *     * The View must be attached to a ViewPlatform which is part of a live     * scene graph, and the ViewPlatform node must have its     * <code>ALLOW_LOCAL_TO_VWORLD_READ</code> capability set.     *     * @param c3d the Canvas3D to use     * @param cc2vwl the Transform3D to receive left transform     * @param cc2vwr the Transform3D to receive right transform, or null     */    public void getInverseVworldProjection(Canvas3D c3d, 					   Transform3D cc2vwl,					   Transform3D cc2vwr) {	CanvasInfo ci = updateCache(c3d, "getInverseVworldProjection", true) ;	getInverseVworldProjection(ci) ;	cc2vwl.set(ci.inverseVworldProjection) ;	if (ci.useStereo & cc2vwr != null)	    cc2vwr.set(ci.inverseVworldRightProjection) ;    }    private void getInverseVworldProjection(CanvasInfo ci) {	if (ci.updateInverseVworldProjection) {	    if (verbose) System.err.println("updating InverseVwProjection") ;	    if (ci.inverseVworldProjection == null)		ci.inverseVworldProjection = new Transform3D() ;	    getInverseViewPlatformProjection(ci) ;	    ci.inverseVworldProjection.mul		(vpi.viewPlatformToVworld, ci.inverseViewPlatformProjection) ;	    if (ci.useStereo) {		if (ci.inverseVworldRightProjection == null)		    ci.inverseVworldRightProjection = new Transform3D() ;		ci.inverseVworldRightProjection.mul		    (vpi.viewPlatformToVworld,		     ci.inverseViewPlatformRightProjection) ;	    }	    ci.updateInverseVworldProjection = false ;	}    }    //    // Compute a projection matrix from the given eye position in image plate,    // the front and back clip Z positions in image plate, and the current    // canvas position in image plate.    //     private void computeProjection(CanvasInfo ci, Point3d eye,				   double front, double back, Transform3D p) {	// Convert everything to eye coordinates.	double lx = ci.canvasX - eye.x ;		   // left   (low x)	double ly = ci.canvasY - eye.y ;		   // bottom (low y)	double hx = (ci.canvasX+ci.canvasWidth)	 - eye.x ; // right  (high x)	double hy = (ci.canvasY+ci.canvasHeight) - eye.y ; // top    (high y)	double nz = front - eye.z ;			   // front  (near z)	double fz = back  - eye.z ;			   // back   (far z)	double iz = -eye.z ;				   // plate  (image z)	if (projectionPolicy == View.PERSPECTIVE_PROJECTION)	    computePerspectiveProjection(lx, ly, hx, hy, iz, nz, fz, m16d) ;	else	    computeParallelProjection(lx, ly, hx, hy, nz, fz, m16d) ;	p.set(m16d) ;    }    //    // Compute a perspective projection from the given eye-space bounds.    //    private void computePerspectiveProjection(double lx, double ly,					      double hx, double hy,					      double iz, double nz,					      double fz, double[] m) {        //        // We first derive the X and Y projection components without regard        // for Z scaling.  The Z scaling or perspective depth is handled by        // matrix elements expressed solely in terms of the near and far clip        // planes.        //        // Since the eye is at the origin, the projector for any point V in        // eye space is just V.  Any point along this ray can be expressed in        // parametric form as P = tV.  To find the projection onto the plane        // containing the canvas, find t such that P.z = iz; ie, t = iz/V.z.        // The projection P is thus [V.x*iz/V.z, V.y*iz/V.z, iz].        //         // This projection can expressed as the following matrix equation:        //        //   -iz     0     0     0       V.x        //    0     -iz    0     0   X   V.y        //    0      0    -iz    0       V.z        //    0      0    -1     0        1              {matrix 1}        //        // where the matrix elements have been negated so that w is positive.        // This is mostly by convention, although some hardware won't handle        // clipping in the -w half-space.        //        // After the point has been projected to the image plate, the        // canvas bounds need to be mapped to the [-1..1] of Java 3D's        // clipping space.  The scale factor for X is thus 2/(hx - lx); adding        // the translation results in (V.x - lx)(2/(hx - lx)) - 1, which after        // some algebra can be confirmed to the same as the following        // canonical scale/offset form:        //         //   V.x*2/(hx - lx) - (hx + lx)/(hx - lx)        //        // Similarly for Y:        //        //   V.y*2/(hy - ly) - (hy + ly)/(hy - ly)        //        // If we set idx = 1/(hx - lx) and idy = 1/(hy - ly), then we get:        //        //   2*V.x*idx - (hx + lx)idx        //   2*V.y*idy - (hy + ly)idy        //         // These scales and offsets are represented by the following matrix:        //         //   2*idx       0         0  -(hx + lx)*idx        //     0       2*idy       0  -(hy + ly)*idy        //     0         0         1         0              //     0         0         0         1           {matrix 2}         //        // The result after concatenating the projection transform        // ({matrix 2} X {matrix 1}):        //         //   -2*iz*idx     0      (hx + lx)*idx    0        //       0     -2*iz*idy  (hy + ly)*idy    0        //       0         0           -iz {a}     0 {b}        //       0         0           -1          0     {matrix 3}        //         // The Z scaling is handled by m[10] ("a") and m[11]

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -