⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ch3dtrvs.cpp

📁 Windows上的MUD客户端程序
💻 CPP
📖 第 1 页 / 共 5 页
字号:
	}

}

#if 0
// example for traverses
void									      
classname::traverse(QvState *state)					      
{									      
    QvElement *elt = new QvElement;					      
    elt->data = this;							      
    state->addElement(QvState::classIndex, elt);

	G3dHandle_t hGC = ((ChQvState*)state)->GetView()->GetGC();
	  
	if( ((ChQvState*)state)->GetType() ==  ChQvState::draw)
	{
	}
	else if( ((ChQvState*)state)->GetType() ==  ChQvState::command)
	{
	}			
}
#endif


DO_PROPERTY(QvShapeHints,		ShapeHintsIndex)

void									      
QvMaterial::traverse(QvState *state)					      
{									      
    ANNOUNCE(className);						      
	DEFAULT_QUERYNODE(this, state);	
    QvElement *elt = new QvElement;					      
    elt->data = this;							      
    state->addElement(QvState::MaterialIndex, elt);
	if(!m_pRenderData) m_pRenderData = new ChQvMaterialRenderData(this);

}


// i oughta RTFM; i didn't need to write this i think. Use G3dTransformPointF instead.

#if defined(CH_USE_3DR)
void Mult(GxTransformF_t& mat, PointF_t& a, PointF_t& b)
{	// b must not be the same as a
	b.x = a.x * mat[0][0] + a.y * mat[0][1] + a.z * mat[0][2];
	b.y = a.x * mat[1][0] + a.y * mat[1][1] + a.z * mat[1][2];
	b.z = a.x * mat[2][0] + a.y * mat[2][1] + a.z * mat[2][2];
}

//void CreateHeadlight(  PointFW_t loc, PointF_t dir, QvState *state );

void									      
QvPerspectiveCamera::traverse(QvState *state)					      
{									      
    ANNOUNCE(className);
   						      
 	DoNodeEditCommand(this, state);

    QvElement *elt = new QvElement;					      
    elt->data = this;							      
    elt->type = QvElement::PerspectiveCamera;					      
    state->addElement(QvState::CameraIndex, elt);

	if( ((ChQvState*)state)->GetType() ==  ChQvState::buildInstance)
	{
		G3dHandle_t hGC = ((ChQvState*)state)->GetView()->GetGC();
		ChRenderContext *pRC = ((ChQvState*)state)->GetView()->GetRenderContext();

		G3dSetCameraProjection(hGC, G3DP_PERSPECTIVE);
		PointF_t	up, upa = {0.0f, 1.0f, 0.0f};		// starting loc for look dirs
		PointF_t	dir, dira = {0.0f, 0.0f, -1.0f};		// will rotate later
		PointFW_t 	loc;

		ChQvPCameraRenderData *pRenderData = (ChQvPCameraRenderData *)GetRenderData();

		if(!pRenderData)
		{
			m_pRenderData = pRenderData = new ChQvPCameraRenderData(this, (ChQvBuildState*)state);
		}

		// Use the node to set the render data
		Qv2Native(position, loc);
		loc.w = 0.;				 // infinite camera - faster

		float angle = orientation.angle;

		GxTransformF_t	rotMat;
		RotateMatrix(-angle, 
						orientation.axis[0],
						orientation.axis[1],
						orientation.axis[2],
						rotMat);

 		// Inventor puts the camera in transformed world (model) coordinates,
		// But 3dr defines the bottom model transform by the camera. 
		// Therefore, we have to walk the qv stack of transforms, accumulating them
		// Then we transform by the resultant matrix

		GxTransformF_t	stackMat, cameraMat;
		AccumQVTransform( state, stackMat);

		G3dMultMatrix( rotMat, stackMat, cameraMat );

		Mult(cameraMat, dira, dir);

		
		if(pRC->GetViewerMode() != walk )
		{
			Mult(cameraMat, upa, up);
		}
		else						// We're walking; keep camera level
		{							// This computation is not pure VRML,
									// but it keeps camera manipulations much cleaner for
									// walking situations. The camera acts more camera-like 
									// and less airplane-like.

			PointF_t n;				// normal to plane defined by (dir, yaxis)
			PointF_t axis = {	orientation.axis[0],
								orientation.axis[1],
								orientation.axis[2] };
			PointF_t yAxis = {	0, 1, 0 };

 			G3dCross(&yAxis, &dir, &n);
			if (G3dDot(&n, &n) > SMIDGEON)
			{
 				G3dCross(&dir, &n, &up);
 			}
			else
			{
				// special case; looking straight up or down
				Mult(cameraMat, upa, up);
			} 

			if (G3dDot(&up, &up) < SMIDGEON)
			{
				up.x = 0; up.y = 1; up.z = 0;
			}
			G3dUnitVector(&up);
		}


		pRenderData->SetLoc(*(GxVec3f*)&loc)->SetUp(*(GxVec3f*)&up)->SetDir(*(GxVec3f*)&dir);
		pRenderData->SetStartLoc(*(GxVec3f*)&loc)->SetStartUp(*(GxVec3f*)&up)->SetStartDir(*(GxVec3f*)&dir);
		pRenderData->SetDirty(false);

		loc.w = 0;		// infinite camera for all but highest quality
						// rendering
		
		G3dSetCameraPosition(hGC,  &loc, &dir, &up);
		// use window size for projection; note that coord system is y-up
		RECT rt;
		((ChQvState*)state)->GetView()->GetClientRect(&rt);

		// set the view box based on the angle they requested, and the
		// window's aspect ratio, and the bounds of the scene graph

		// TODO: try out the RL fix here; use a big maxZ (like 32k) but work in
		// just the front of the buffer, by multiplying fFar by 10 or 40 or something

		Float_t fNear = nearDistance.value; 
		Float_t fFar = 50000.;
		const float maxZResolution = 2000.;	// Assuming ?? bits signed
		ChMazeWnd * pView = ((ChQvState*)state)->GetView();
		if(pView->GetBounds())
		{				   
							// Compute based on scene bounds, and
							// multiply in a little fudge factor for luck
			fFar = 1.5 * pView->GetBounds()->GetFarDistance(loc.x, loc.y, loc.z);
		}
		float minNear = fFar / maxZResolution;	   // prevent zbuffer overrun
		fNear = 1.0;							  // hack
		fNear = max(fNear, minNear);
		Float_t top = tan(heightAngle.value / 2.) * fNear ;
		Float_t bottom = -top;
		Float_t right = top * rt.right / rt.bottom;
		Float_t left = -right;

		G3dSetCameraView(hGC, right, left, top, bottom, fNear, fFar);

		G3dSetCameraPort(	hGC, 
							Float_t(rt.right),		/* right */
							0.0f,					/* left*/
			    			Float_t(rt.bottom),		/* top */
			    			0.0f);					/* bottom */

		// Now let's save the matrices into the render data for later use
				
		memcpy(pRenderData->m_cameraTransform, G3dGetModelCamMatrix(hGC), sizeof(pRenderData->m_cameraTransform));
		memcpy(pRenderData->m_invCameraTransform, G3dGetInverseModelCamMatrix(hGC), sizeof(pRenderData->m_invCameraTransform));
		G3dSetActiveStack(hGC, G3DT_CAM_CLIP);
		GxTransformF_t mat;
		G3dGetTransform(hGC, mat);	   
		pRenderData->m_camClipTransform	= GxTransform3Wf(mat);
		G3dSetActiveStack(hGC, G3DT_MODEL);

		pRenderData->m_right 	=  Float_t(rt.right);		/* right */        
		pRenderData->m_left  	=  0.0f;					/* left*/          
		pRenderData->m_top   	=  Float_t(rt.bottom);		/* top */          
		pRenderData->m_bottom	=  0.0f;					/* bottom */ 
		      
		// Now rebuild the renderer's transform stack, based on the qv stack
		// To get back to model coords
		pRC->RebuildTransformStack((ChQvState*)state);
		pRC->SetCameraLoc(*(GxVec3f*)&loc);			// in world coords!
	}
	else if(((ChQvState*)state)->GetType() ==  ChQvState::draw ||  ((ChQvState*)state)->GetType() ==  ChQvState::queryNode)
	{
		G3dHandle_t hGC = ((ChQvState*)state)->GetView()->GetGC();
		ChRenderContext *pRC = ((ChQvState*)state)->GetView()->GetRenderContext();

		G3dSetCameraProjection(hGC, G3DP_PERSPECTIVE);
		PointF_t	up, upa = {0.0f, 1.0f, 0.0f};		// starting loc for look dirs
		PointF_t	dir, dira = {0.0f, 0.0f, -1.0f};		// will rotate later
		PointFW_t 	loc;

		ChQvPCameraRenderData *pRenderData = (ChQvPCameraRenderData *)GetRenderData();
		ASSERT(pRenderData);

		if(pRenderData->IsDirty())
		{
			// Use the node to set the render data
			Qv2Native(position, loc);
			loc.w = 0.;				 // infinite camera - faster
	
			float angle = orientation.angle;

			GxTransformF_t	rotMat;
			RotateMatrix(-angle, 
							orientation.axis[0],
							orientation.axis[1],
							orientation.axis[2],
							rotMat);

	 		// Inventor puts the camera in transformed world (model) coordinates,
			// But 3dr defines the bottom model transform by the camera. 
			// Therefore, we have to walk the qv stack of transforms, accumulating them
			// Then we transform by the resultant matrix

			GxTransformF_t	stackMat, cameraMat;
			AccumQVTransform( state, stackMat);

			G3dMultMatrix( rotMat, stackMat, cameraMat );

			Mult(cameraMat, dira, dir);

			
			if(pRC->GetViewerMode() != walk )
			{
				Mult(cameraMat, upa, up);
			}
			else						// We're walking; keep camera level
			{							// This computation is not pure VRML,
										// but it keeps camera manipulations much cleaner for
										// walking situations. The camera acts more camera-like 
										// and less airplane-like.

				PointF_t n;				// normal to plane defined by (dir, yaxis)
				PointF_t axis = {	orientation.axis[0],
									orientation.axis[1],
									orientation.axis[2] };
				PointF_t yAxis = {	0, 1, 0 };

	 			G3dCross(&yAxis, &dir, &n);
				if (G3dDot(&n, &n) > SMIDGEON)
				{
	 				G3dCross(&dir, &n, &up);
	 			}
				else
				{
					// special case; looking straight up or down
					Mult(cameraMat, upa, up);
				} 

				if (G3dDot(&up, &up) < SMIDGEON)
				{
					up.x = 0; up.y = 1; up.z = 0;
				}
				G3dUnitVector(&up);
			}


			pRenderData->SetLoc(*(GxVec3f*)&loc)->SetUp(*(GxVec3f*)&up)->SetDir(*(GxVec3f*)&dir);
			pRenderData->SetDirty(false);
		}
		else
		{
			GxVec3f gloc = pRenderData->GetLoc();
			GxVec3f gup =  pRenderData->GetUp();
			GxVec3f gdir = pRenderData->GetDir();
			loc.x = gloc.x();
			loc.y = gloc.y();
			loc.z = gloc.z();
			up.x = gup.x();
			up.y = gup.y();
			up.z = gup.z();
			dir.x = gdir.x();
			dir.y = gdir.y();
			dir.z = gdir.z();

		}
		loc.w = 0;		// infinite camera for all but highest quality
						// rendering
		
		G3dSetCameraPosition(hGC,  &loc, &dir, &up);
		// use window size for projection; note that coord system is y-up
		RECT rt;
		((ChQvState*)state)->GetView()->GetClientRect(&rt);

		// set the view box based on the angle they requested, and the
		// window's aspect ratio, and the bounds of the scene graph
		Float_t fNear = nearDistance.value; 
		Float_t fFar = 50000.;
		const float maxZResolution = 2000.;	// Assuming ?? bits signed
		ChMazeWnd * pView = ((ChQvState*)state)->GetView();
		if(pView->GetBounds())
		{				   
							// Compute based on scene bounds, and
							// multiply in a little fudge factor for luck
			fFar = 1.5 * pView->GetBounds()->GetFarDistance(loc.x, loc.y, loc.z);
		}
		float minNear = fFar / maxZResolution;	   // prevent zbuffer overrun
		fNear = 1.0;							  // hack
		fNear = max(fNear, minNear);
		Float_t top = tan(heightAngle.value / 2.) * fNear ;
		Float_t bottom = -top;
		Float_t right = top * rt.right / rt.bottom;
		Float_t left = -right;

		G3dSetCameraView(hGC, right, left, top, bottom, fNear, fFar);

		G3dSetCameraPort(	hGC, 
							Float_t(rt.right),		/* right */
							0.0f,					/* left*/
			    			Float_t(rt.bottom),		/* top */
			    			0.0f);					/* bottom */

		// We've now established world, camera and screen spaces.
		// Now let's save the world-to-camera and camclip matrices into the render data for later use
				
		memcpy(pRenderData->m_cameraTransform, G3dGetModelCamMatrix(hGC), sizeof(pRenderData->m_cameraTransform));
		memcpy(pRenderData->m_invCameraTransform, G3dGetInverseModelCamMatrix(hGC), sizeof(pRenderData->m_invCameraTransform));

		G3dSetActiveStack(hGC, G3DT_CAM_CLIP);
		GxTransformF_t mat;
		G3dGetTransform(hGC, mat);	   
		pRenderData->m_camClipTransform	= GxTransform3Wf(mat);
		G3dSetActiveStack(hGC, G3DT_MODEL);

		pRenderData->m_right 	=  Float_t(rt.right);		/* right */        
		pRenderData->m_left  	=  0.0f;					/* left*/          
		pRenderData->m_top   	=  0.0f;					/* top */          
		pRenderData->m_bottom	=  Float_t(rt.bottom);		/* bottom */ 
		      									  
		// Turn on headlight if required, now we know where the camera is
		// This is done in world coords, just like 3dr wanted the camera
		pRC->CreateHeadlight( GxVec3f(loc.x, loc.y, loc.z),  GxVec3f(dir.x, dir.y, dir.z), state );

		// Now rebuild the renderer's transform stack, based on the qv stack
		// To get back to model coords
		pRC->RebuildTransformStack((ChQvState*)state);
		pRC->SetCameraLoc(*(GxVec3f*)&loc);			// in world coords!
	}
	else if( ((ChQvState*)state)->GetType() ==  ChQvState::command)
	{
	}			
 	DEFAULT_QUERYNODE(this, state);	
}

void									      
QvOrthographicCamera::traverse(QvState *state)					      
{									      
    ANNOUNCE(className);
   						      
 	DoNodeEditCommand(this, state);

    QvElement *elt = new QvElement;					      
    elt->data = this;							      
    elt->type = QvElement::OrthographicCamera;					      
    state->addElement(QvState::CameraIndex, elt);

	if( ((ChQvState*)state)->GetType() ==  ChQvState::buildInstance)
	{
		G3dHandle_t hGC = ((ChQvState*)state)->GetView()->GetGC();
		ChRenderContext *pRC = ((ChQvState*)state)->GetView()->GetRenderContext();

		G3dSetCameraProjection(hGC, G3DP_PARALLEL);		 // This is major difference for ortho
		PointF_t	up, upa = {0.0f, 1.0f, 0.0f};		// starting loc for look dirs
		PointF_t	dir, dira = {0.0f, 0.0f, -1.0f};		// will rotate later
		PointFW_t 	loc;

		ChQvPCameraRenderData *pRenderData = (ChQvPCameraRenderData *)GetRenderData();

		if(!pRenderData)
		{
			m_pRenderData = pRenderData = new ChQvPCameraRenderData(this, (ChQvBuildState*)state);
			pRenderData->m_boolPerspective = false;
		}

		// Use the node to set the render data
		Qv2Native(position, loc);
		loc.w = 0.;				 // infinite camera - faster

		float angle = orientation.angle;

		GxTransformF_t	rotMat;
		RotateMatrix(-angle, 
						orientation.axis[0],
						orientation.axis[1],
						orientation.axis[2],
						rotMat);

 		// Inventor puts the camera in transformed world (model) coordinates,
		// But 3dr defines the bottom model transform by the camera. 
		// Therefore, we have to walk the qv stack of transforms, accumulating them
		// Then we transform by the resultant matrix

		GxTransformF_t	stackMat, cameraMat;
		AccumQVTransform( state, stackMat);

		G3dMultMatrix( rotMat, stackMat, cameraMat );

		Mult(cameraMat, dira, dir);

		

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -