⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ch3dtrvs.cpp

📁 Windows上的MUD客户端程序
💻 CPP
📖 第 1 页 / 共 5 页
字号:
		if(pRC->GetViewerMode() != walk )
		{
			Mult(cameraMat, upa, up);
		}
		else						// We're walking; keep camera level
		{							// This computation is not pure VRML,
									// but it keeps camera manipulations much cleaner for
									// walking situations. The camera acts more camera-like 
									// and less airplane-like.

			PointF_t n;				// normal to plane defined by (dir, yaxis)
			PointF_t axis = {	orientation.axis[0],
								orientation.axis[1],
								orientation.axis[2] };
			PointF_t yAxis = {	0, 1, 0 };

 			G3dCross(&yAxis, &dir, &n);
			if (G3dDot(&n, &n) > SMIDGEON)
			{
 				G3dCross(&dir, &n, &up);
 			}
			else
			{
				// special case; looking straight up or down
				Mult(cameraMat, upa, up);
			} 

			if (G3dDot(&up, &up) < SMIDGEON)
			{
				up.x = 0; up.y = 1; up.z = 0;
			}
			G3dUnitVector(&up);
		}


		pRenderData->SetLoc(*(GxVec3f*)&loc)->SetUp(*(GxVec3f*)&up)->SetDir(*(GxVec3f*)&dir);
		pRenderData->SetStartLoc(*(GxVec3f*)&loc)->SetStartUp(*(GxVec3f*)&up)->SetStartDir(*(GxVec3f*)&dir);
		pRenderData->SetDirty(false);

		loc.w = 0;		// infinite camera for all but highest quality
						// rendering
		
		G3dSetCameraPosition(hGC,  &loc, &dir, &up);
		// use window size for projection; note that coord system is y-up
		RECT rt;
		((ChQvState*)state)->GetView()->GetClientRect(&rt);

		// set the view box based on the angle they requested, and the
		// window's aspect ratio, and the bounds of the scene graph
		Float_t fNear = nearDistance.value; 
		Float_t fFar = 50000.;
		const float maxZResolution = 2000.;	// Assuming ?? bits signed
		ChMazeWnd * pView = ((ChQvState*)state)->GetView();
		if(pView->GetBounds())
		{				   
							// Compute based on scene bounds, and
							// multiply in a little fudge factor for luck
			fFar = 1.5 * pView->GetBounds()->GetFarDistance(loc.x, loc.y, loc.z);
		}
		float minNear = fFar / maxZResolution;	   // prevent zbuffer overrun
		fNear = 1.0;							  // hack
		fNear = max(fNear, minNear);
		Float_t top = height.value / 2.;		  // This is difference for ortho
		Float_t bottom = -top;
		Float_t right = top * rt.right / rt.bottom;
		Float_t left = -right;

		G3dSetCameraView(hGC, right, left, top, bottom, fNear, fFar);

		G3dSetCameraPort(	hGC, 
							Float_t(rt.right),		/* right */
							0.0f,					/* left*/
			    			Float_t(rt.bottom),		/* top */
			    			0.0f);					/* bottom */

		// Now let's save the matrices into the render data for later use
				
		memcpy(pRenderData->m_cameraTransform, G3dGetModelCamMatrix(hGC), sizeof(pRenderData->m_cameraTransform));
		memcpy(pRenderData->m_invCameraTransform, G3dGetInverseModelCamMatrix(hGC), sizeof(pRenderData->m_invCameraTransform));
		G3dSetActiveStack(hGC, G3DT_CAM_CLIP);
		GxTransformF_t mat;
		G3dGetTransform(hGC, mat);	   
		pRenderData->m_camClipTransform	= GxTransform3Wf(mat);
		G3dSetActiveStack(hGC, G3DT_MODEL);

		pRenderData->m_right 	=  Float_t(rt.right);		/* right */        
		pRenderData->m_left  	=  0.0f;					/* left*/          
		pRenderData->m_top   	=  Float_t(rt.bottom);		/* top */          
		pRenderData->m_bottom	=  0.0f;					/* bottom */ 
		      
		// Now rebuild the renderer's transform stack, based on the qv stack
		// To get back to model coords
		pRC->RebuildTransformStack((ChQvState*)state);
		pRC->SetCameraLoc(*(GxVec3f*)&loc);			// in world coords!
	}
	else if(((ChQvState*)state)->GetType() ==  ChQvState::draw ||  ((ChQvState*)state)->GetType() ==  ChQvState::queryNode)
	{
		G3dHandle_t hGC = ((ChQvState*)state)->GetView()->GetGC();
		ChRenderContext *pRC = ((ChQvState*)state)->GetView()->GetRenderContext();

		G3dSetCameraProjection(hGC, G3DP_PARALLEL);		 // This is major difference for ortho
		PointF_t	up, upa = {0.0f, 1.0f, 0.0f};		// starting loc for look dirs
		PointF_t	dir, dira = {0.0f, 0.0f, -1.0f};		// will rotate later
		PointFW_t 	loc;

		ChQvPCameraRenderData *pRenderData = (ChQvPCameraRenderData *)GetRenderData();
		ASSERT(pRenderData);

		if(pRenderData->IsDirty())
		{
			// Use the node to set the render data
			Qv2Native(position, loc);
			loc.w = 0.;				 // infinite camera - faster
	
			float angle = orientation.angle;

			GxTransformF_t	rotMat;
			RotateMatrix(-angle, 
							orientation.axis[0],
							orientation.axis[1],
							orientation.axis[2],
							rotMat);

	 		// Inventor puts the camera in transformed world (model) coordinates,
			// But 3dr defines the bottom model transform by the camera. 
			// Therefore, we have to walk the qv stack of transforms, accumulating them
			// Then we transform by the resultant matrix

			GxTransformF_t	stackMat, cameraMat;
			AccumQVTransform( state, stackMat);

			G3dMultMatrix( rotMat, stackMat, cameraMat );

			Mult(cameraMat, dira, dir);

			
			if(pRC->GetViewerMode() != walk )
			{
				Mult(cameraMat, upa, up);
			}
			else						// We're walking; keep camera level
			{							// This computation is not pure VRML,
										// but it keeps camera manipulations much cleaner for
										// walking situations. The camera acts more camera-like 
										// and less airplane-like.

				PointF_t n;				// normal to plane defined by (dir, yaxis)
				PointF_t axis = {	orientation.axis[0],
									orientation.axis[1],
									orientation.axis[2] };
				PointF_t yAxis = {	0, 1, 0 };

	 			G3dCross(&yAxis, &dir, &n);
				if (G3dDot(&n, &n) > SMIDGEON)
				{
	 				G3dCross(&dir, &n, &up);
	 			}
				else
				{
					// special case; looking straight up or down
					Mult(cameraMat, upa, up);
				} 

				if (G3dDot(&up, &up) < SMIDGEON)
				{
					up.x = 0; up.y = 1; up.z = 0;
				}
				G3dUnitVector(&up);
			}


			pRenderData->SetLoc(*(GxVec3f*)&loc)->SetUp(*(GxVec3f*)&up)->SetDir(*(GxVec3f*)&dir);
			pRenderData->SetDirty(false);
		}
		else
		{
			GxVec3f gloc = pRenderData->GetLoc();
			GxVec3f gup =  pRenderData->GetUp();
			GxVec3f gdir = pRenderData->GetDir();
			loc.x = gloc.x();
			loc.y = gloc.y();
			loc.z = gloc.z();
			up.x = gup.x();
			up.y = gup.y();
			up.z = gup.z();
			dir.x = gdir.x();
			dir.y = gdir.y();
			dir.z = gdir.z();

		}
		loc.w = 0;		// infinite camera for all but highest quality
						// rendering
		
		G3dSetCameraPosition(hGC,  &loc, &dir, &up);
		// use window size for projection; note that coord system is y-up
		RECT rt;
		((ChQvState*)state)->GetView()->GetClientRect(&rt);

		// set the view box based on the angle they requested, and the
		// window's aspect ratio, and the bounds of the scene graph
		Float_t fNear = nearDistance.value; 
		Float_t fFar = 50000.;
		const float maxZResolution = 2000.;	// Assuming ?? bits signed
		ChMazeWnd * pView = ((ChQvState*)state)->GetView();
		if(pView->GetBounds())
		{				   
							// Compute based on scene bounds, and
							// multiply in a little fudge factor for luck
			fFar = 1.5 * pView->GetBounds()->GetFarDistance(loc.x, loc.y, loc.z);
		}
		float minNear = fFar / maxZResolution;	   // prevent zbuffer overrun
		fNear = 1.0;							  // hack
		fNear = max(fNear, minNear);
		Float_t top = height.value / 2.;		  // This is difference for ortho
		Float_t bottom = -top;
		Float_t right = top * rt.right / rt.bottom;
		Float_t left = -right;

		G3dSetCameraView(hGC, right, left, top, bottom, fNear, fFar);

		G3dSetCameraPort(	hGC, 
							Float_t(rt.right),		/* right */
							0.0f,					/* left*/
			    			Float_t(rt.bottom),		/* top */
			    			0.0f);					/* bottom */

		// We've now established world, camera and screen spaces.
		// Now let's save the world-to-camera and camclip matrices into the render data for later use
				
		memcpy(pRenderData->m_cameraTransform, G3dGetModelCamMatrix(hGC), sizeof(pRenderData->m_cameraTransform));
		memcpy(pRenderData->m_invCameraTransform, G3dGetInverseModelCamMatrix(hGC), sizeof(pRenderData->m_invCameraTransform));

		G3dSetActiveStack(hGC, G3DT_CAM_CLIP);
		GxTransformF_t mat;
		G3dGetTransform(hGC, mat);	   
		pRenderData->m_camClipTransform	= GxTransform3Wf(mat);
		G3dSetActiveStack(hGC, G3DT_MODEL);

		pRenderData->m_right 	=  Float_t(rt.right);		/* right */        
		pRenderData->m_left  	=  0.0f;					/* left*/          
		pRenderData->m_top   	=  0.0f;					/* top */          
		pRenderData->m_bottom	=  Float_t(rt.bottom);		/* bottom */ 
		      									  
		// Turn on headlight if required, now we know where the camera is
		// This is done in world coords, just like 3dr wanted the camera
		pRC->CreateHeadlight( GxVec3f(loc.x, loc.y, loc.z),  GxVec3f(dir.x, dir.y, dir.z), state );

		// Now rebuild the renderer's transform stack, based on the qv stack
		// To get back to model coords
		pRC->RebuildTransformStack((ChQvState*)state);
		pRC->SetCameraLoc(*(GxVec3f*)&loc);			// in world coords!
	}
	else if( ((ChQvState*)state)->GetType() ==  ChQvState::command)
	{
	}			
 	DEFAULT_QUERYNODE(this, state);	
}
#elif (defined(CH_USE_RLAB) || defined(CH_USE_D3D))
//DO_TYPED_PROPERTY(QvOrthographicCamera, CameraIndex,OrthographicCamera)

void									      
QvOrthographicCamera::traverse(QvState *state)					      
{									      
    ANNOUNCE(className);
   						      
 	DoNodeEditCommand(this, state);

    QvElement *elt = new QvElement;					      
    elt->data = this;							      
    elt->type = QvElement::OrthographicCamera;					      
    state->addElement(QvState::CameraIndex, elt);

	if( ((ChQvState*)state)->GetType() ==  ChQvState::buildInstance)
	{
		ChQvPCameraRenderData *pRenderData = (ChQvPCameraRenderData *)GetRenderData();

		if(!pRenderData)
		{
			m_pRenderData = pRenderData = new ChQvPCameraRenderData(this, (ChQvBuildState*)state);
		}
	}
	else if(((ChQvState*)state)->GetType() ==  ChQvState::draw ||  ((ChQvState*)state)->GetType() ==  ChQvState::queryNode)
	{
	}
	else if( ((ChQvState*)state)->GetType() ==  ChQvState::command)
	{
	}			
 	DEFAULT_QUERYNODE(this, state);	
}

void									      
QvPerspectiveCamera::traverse(QvState *state)					      
{									      
    ANNOUNCE(className);
   						      
 	DoNodeEditCommand(this, state);

    QvElement *elt = new QvElement;					      
    elt->data = this;							      
    elt->type = QvElement::PerspectiveCamera;					      
    state->addElement(QvState::CameraIndex, elt);

	if( ((ChQvState*)state)->GetType() ==  ChQvState::buildInstance)
	{
		ChQvPCameraRenderData *pRenderData = (ChQvPCameraRenderData *)GetRenderData();

		if(!pRenderData)
		{
			m_pRenderData = pRenderData = new ChQvPCameraRenderData(this, (ChQvBuildState*)state);
		}
	}
	else if(((ChQvState*)state)->GetType() ==  ChQvState::draw ||  ((ChQvState*)state)->GetType() ==  ChQvState::queryNode)
	{
	}
	else if( ((ChQvState*)state)->GetType() ==  ChQvState::command)
	{
	}			
 	DEFAULT_QUERYNODE(this, state);	
}

#endif


void									      
QvTransform::traverse(QvState *state)					      
{									      
 	DoNodeEditCommand(this, state);
 	DEFAULT_QUERYNODE(this, state);	

	ChQvElement *elt = new ChQvElement;					      
	elt->data = this;							      
	elt->type = QvElement::Transform;					      
	state->addElement(QvState::TransformationIndex, elt);

	  
	if( ((ChQvState*)state)->GetType() ==  ChQvState::draw || 
		((ChQvState*)state)->GetType() ==  ChQvState::getBounds ||
		((ChQvState*)state)->GetType() ==  ChQvState::buildInstance ||
		((ChQvState*)state)->GetType() ==  ChQvState::hitTest)
	{
		GxTransformF_t mat;		
		GetTransform(this, mat);
		GxTransform3Wf transform(mat);
		ChRenderContext *pRC = ((ChQvState*)state)->GetView()->GetRenderContext();
		pRC->ComposeTransform(transform);

		if(((ChQvState*)state)->GetType() ==  ChQvState::buildInstance ) 
		{	
			ChQvBuildState *bldState = (ChQvBuildState*)state;
			if(!m_pRenderData) m_pRenderData = new ChQvTransformationRenderData(this);
			ChQvTransformationRenderData *pRenderData =  (ChQvTransformationRenderData *)m_pRenderData;
			ChQvTransformInstance *pInstance = new ChQvTransformInstance;
			pInstance->Attach(this, bldState);
			pInstance->SetSelfTransform(mat);

			elt->SetInstance(pInstance);
		}
 
	}
	else if( ((ChQvState*)state)->GetType() ==  ChQvState::command)
	{
	}			
}

void									      
QvRotation::traverse(QvState *state)					      
{									      
 	DoNodeEditCommand(this, state);
 	DEFAULT_QUERYNODE(this, state);	

	ChQvElement *elt = new ChQvElement;					      
	elt->data = this;							      
	elt->type = QvElement::Rotation;					      
	state->addElement(QvState::TransformationIndex, elt);

	  
	if( ((ChQvState*)state)->GetType() ==  ChQvState::draw || 
		((ChQvState*)state)->GetType() ==  ChQvState::getBounds ||
		((ChQvState*)state)->GetType() ==  ChQvState::buildInstance ||
		((ChQvState*)state)->GetType() ==  ChQvState::hitTest)
	{
		GxTransformF_t mat;		
		GetTransform(this, mat);
		GxTransform3Wf transform(mat);
		ChRenderContext *pRC = ((ChQvState*)state)->GetView()->GetRenderContext();
		pRC->ComposeTransform(transform);

		if(((ChQvState*)state)->GetType() ==  ChQvState::buildInstance ) 
		{	
			ChQvBuildState *bldState = (ChQvBuildState*)state;
			if(!m_pRenderData) m_pRenderData = new ChQvTransformationRenderData(this);
			ChQvTransformationRenderData *pRenderData =  (ChQvTransformationRenderData *)m_pRenderData;
			ChQvRotationInstance *pInstance = new ChQvRotationInstance;
			pInstance->Attach(this, bldState);
			pInstance->SetSelfTransform(mat);

			elt->SetInstance(pInstance);
		}
	}
	else if( ((ChQvState*)state)->GetType() ==  ChQvState::command)
	{
	}			
}

void									      
QvMatrixTransform::traverse(QvState *state)					      
{									      
 	DoNodeEditCommand(this, state);
 	DEFAULT_QUERYNODE(this, state);	

	ChQvElement *elt = new ChQvElement;					      
	elt->data = this;							      
	elt->type = QvElement::MatrixTransform;					      
	state->addElement(QvState::TransformationIndex, elt);

	if( ((ChQvState*)state)->GetType() ==  ChQvState::draw || 
		((ChQvState*)state)->GetType() ==  ChQvState::getBounds ||
		((ChQvState*)state)->GetType() ==  ChQvState::buildInstance ||
		((ChQvState*)state)->GetType() ==  ChQvState::hitTest)
	{
		GxTransformF_t mat;		
		GetTransform(this, mat);
		GxTransform3Wf transform(mat);
		ChRenderContext *pRC = ((ChQvState*)state)->GetView()->GetRenderContext();
		pRC->C

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -