📄 t3dlib7.cpp
字号:
} // end for vertex
} // end for poly
} // end Camera_To_Perspective_RENDERLIST4DV2
////////////////////////////////////////////////////////////////
void Camera_To_Perspective_Screen_RENDERLIST4DV2(RENDERLIST4DV2_PTR rend_list,
CAM4DV1_PTR cam)
{
// NOTE: this is not a matrix based function
// this function transforms the camera coordinates of an object
// into Screen scaled perspective coordinates, based on the
// sent camera object, that is, view_dist_h and view_dist_v
// should be set to cause the desired (viewport_width X viewport_height)
// it only works on the vertices in the tvlist[] list
// finally, the function also inverts the y axis, so the coordinates
// generated from this function ARE screen coordinates and ready for
// rendering
// transform each polygon in the render list to perspective screen
// coordinates assumes the render list has already been transformed
// to camera coordinates and the result is in tvlist[]
for (int poly = 0; poly < rend_list->num_polys; poly++)
{
// acquire current polygon
POLYF4DV2_PTR curr_poly = rend_list->poly_ptrs[poly];
// is this polygon valid?
// transform this polygon if and only if it's not clipped, not culled,
// active, and visible, note however the concept of "backface" is
// irrelevant in a wire frame engine though
if ((curr_poly==NULL) || !(curr_poly->state & POLY4DV2_STATE_ACTIVE) ||
(curr_poly->state & POLY4DV2_STATE_CLIPPED ) ||
(curr_poly->state & POLY4DV2_STATE_BACKFACE) )
continue; // move onto next poly
float alpha = (0.5*cam->viewport_width-0.5);
float beta = (0.5*cam->viewport_height-0.5);
// all good, let's transform
for (int vertex = 0; vertex < 3; vertex++)
{
float z = curr_poly->tvlist[vertex].z;
// transform the vertex by the view parameters in the camera
curr_poly->tvlist[vertex].x = cam->view_dist*curr_poly->tvlist[vertex].x/z;
curr_poly->tvlist[vertex].y = cam->view_dist*curr_poly->tvlist[vertex].y/z;
// z = z, so no change
// not that we are NOT dividing by the homogenous w coordinate since
// we are not using a matrix operation for this version of the function
// now the coordinates are in the range x:(-viewport_width/2 to viewport_width/2)
// and y:(-viewport_height/2 to viewport_height/2), thus we need a translation and
// since the y-axis is inverted, we need to invert y to complete the screen
// transform:
curr_poly->tvlist[vertex].x = curr_poly->tvlist[vertex].x + alpha;
curr_poly->tvlist[vertex].y = -curr_poly->tvlist[vertex].y + beta;
} // end for vertex
} // end for poly
} // end Camera_To_Perspective_Screen_RENDERLIST4DV2
//////////////////////////////////////////////////////////////
void Perspective_To_Screen_RENDERLIST4DV2(RENDERLIST4DV2_PTR rend_list,
CAM4DV1_PTR cam)
{
// NOTE: this is not a matrix based function
// this function transforms the perspective coordinates of the render
// list into screen coordinates, based on the sent viewport in the camera
// assuming that the viewplane coordinates were normalized
// you would use this function instead of the object based function
// if you decided earlier in the pipeline to turn each object into
// a list of polygons and then add them to the global render list
// you would only call this function if you previously performed
// a normalized perspective transform
// transform each polygon in the render list from perspective to screen
// coordinates assumes the render list has already been transformed
// to normalized perspective coordinates and the result is in tvlist[]
for (int poly = 0; poly < rend_list->num_polys; poly++)
{
// acquire current polygon
POLYF4DV2_PTR curr_poly = rend_list->poly_ptrs[poly];
// is this polygon valid?
// transform this polygon if and only if it's not clipped, not culled,
// active, and visible, note however the concept of "backface" is
// irrelevant in a wire frame engine though
if ((curr_poly==NULL) || !(curr_poly->state & POLY4DV2_STATE_ACTIVE) ||
(curr_poly->state & POLY4DV2_STATE_CLIPPED ) ||
(curr_poly->state & POLY4DV2_STATE_BACKFACE) )
continue; // move onto next poly
float alpha = (0.5*cam->viewport_width-0.5);
float beta = (0.5*cam->viewport_height-0.5);
// all good, let's transform
for (int vertex = 0; vertex < 3; vertex++)
{
// the vertex is in perspective normalized coords from -1 to 1
// on each axis, simple scale them and invert y axis and project
// to screen
// transform the vertex by the view parameters in the camera
curr_poly->tvlist[vertex].x = alpha + alpha*curr_poly->tvlist[vertex].x;
curr_poly->tvlist[vertex].y = beta - beta *curr_poly->tvlist[vertex].y;
} // end for vertex
} // end for poly
} // end Perspective_To_Screen_RENDERLIST4DV2
///////////////////////////////////////////////////////////////
void World_To_Camera_RENDERLIST4DV2(RENDERLIST4DV2_PTR rend_list,
CAM4DV1_PTR cam)
{
// NOTE: this is a matrix based function
// this function transforms each polygon in the global render list
// to camera coordinates based on the sent camera transform matrix
// you would use this function instead of the object based function
// if you decided earlier in the pipeline to turn each object into
// a list of polygons and then add them to the global render list
// the conversion of an object into polygons probably would have
// happened after object culling, local transforms, local to world
// and backface culling, so the minimum number of polygons from
// each object are in the list, note that the function assumes
// that at LEAST the local to world transform has been called
// and the polygon data is in the transformed list tvlist of
// the POLYF4DV1 object
// transform each polygon in the render list into camera coordinates
// assumes the render list has already been transformed to world
// coordinates and the result is in tvlist[] of each polygon object
for (int poly = 0; poly < rend_list->num_polys; poly++)
{
// acquire current polygon
POLYF4DV2_PTR curr_poly = rend_list->poly_ptrs[poly];
// is this polygon valid?
// transform this polygon if and only if it's not clipped, not culled,
// active, and visible, note however the concept of "backface" is
// irrelevant in a wire frame engine though
if ((curr_poly==NULL) || !(curr_poly->state & POLY4DV2_STATE_ACTIVE) ||
(curr_poly->state & POLY4DV2_STATE_CLIPPED ) ||
(curr_poly->state & POLY4DV2_STATE_BACKFACE) )
continue; // move onto next poly
// all good, let's transform
for (int vertex = 0; vertex < 3; vertex++)
{
// transform the vertex by the mcam matrix within the camera
// it better be valid!
POINT4D presult; // hold result of each transformation
// transform point
Mat_Mul_VECTOR4D_4X4(&curr_poly->tvlist[vertex].v, &cam->mcam, &presult);
// store result back
VECTOR4D_COPY(&curr_poly->tvlist[vertex].v, &presult);
} // end for vertex
} // end for poly
} // end World_To_Camera_RENDERLIST4DV2
////////////////////////////////////////////////////////////
void Camera_To_Perspective_OBJECT4DV2(OBJECT4DV2_PTR obj, CAM4DV1_PTR cam)
{
// NOTE: this is not a matrix based function
// this function transforms the camera coordinates of an object
// into perspective coordinates, based on the
// sent camera object, but it totally disregards the polygons themselves,
// it only works on the vertices in the vlist_trans[] list
// this is one way to do it, you might instead transform
// the global list of polygons in the render list since you
// are guaranteed that those polys represent geometry that
// has passed thru backfaces culling (if any)
// finally this function is really for experimental reasons only
// you would probably never let an object stay intact this far down
// the pipeline, since it's probably that there's only a single polygon
// that is visible! But this function has to transform the whole mesh!
// note: only operates on the current frame
// transform each vertex in the object to perspective coordinates
// assumes the object has already been transformed to camera
// coordinates and the result is in vlist_trans[]
for (int vertex = 0; vertex < obj->num_vertices; vertex++)
{
float z = obj->vlist_trans[vertex].z;
// transform the vertex by the view parameters in the camera
obj->vlist_trans[vertex].x = cam->view_dist*obj->vlist_trans[vertex].x/z;
obj->vlist_trans[vertex].y = cam->view_dist*obj->vlist_trans[vertex].y*cam->aspect_ratio/z;
// z = z, so no change
// not that we are NOT dividing by the homogenous w coordinate since
// we are not using a matrix operation for this version of the function
} // end for vertex
} // end Camera_To_Perspective_OBJECT4DV2
//////////////////////////////////////////////////////////////
void Camera_To_Perspective_Screen_OBJECT4DV2(OBJECT4DV2_PTR obj, CAM4DV1_PTR cam)
{
// NOTE: this is not a matrix based function
// this function transforms the camera coordinates of an object
// into Screen scaled perspective coordinates, based on the
// sent camera object, that is, view_dist_h and view_dist_v
// should be set to cause the desired (width X height)
// projection of the vertices, but the function totally
// disregards the polygons themselves,
// it only works on the vertices in the vlist_trans[] list
// this is one way to do it, you might instead transform
// the global list of polygons in the render list since you
// are guaranteed that those polys represent geometry that
// has passed thru backfaces culling (if any)
// finally this function is really for experimental reasons only
// you would probably never let an object stay intact this far down
// the pipeline, since it's probably that there's only a single polygon
// that is visible! But this function has to transform the whole mesh!
// finally, the function also inverts the y axis, so the coordinates
// generated from this function ARE screen coordinates and ready for
// rendering
// note: only operates on the current frame
float alpha = (0.5*cam->viewport_width-0.5);
float beta = (0.5*cam->viewport_height-0.5);
// transform each vertex in the object to perspective screen coordinates
// assumes the object has already been transformed to camera
// coordinates and the result is in vlist_trans[]
for (int vertex = 0; vertex < obj->num_vertices; vertex++)
{
float z = obj->vlist_trans[vertex].z;
// transform the vertex by the view parameters in the camera
obj->vlist_trans[vertex].x = cam->view_dist*obj->vlist_trans[vertex].x/z;
obj->vlist_trans[vertex].y = cam->view_dist*obj->vlist_trans[vertex].y/z;
// z = z, so no change
// not that we are NOT dividing by the homogenous w coordinate since
// we are not using a matrix operation for this version of the function
// now the coordinates are in the range x:(-viewport_width/2 to viewport_width/2)
// and y:(-viewport_height/2 to viewport_height/2), thus we need a translation and
// since the y-axis is inverted, we need to invert y to complete the screen
// transform:
obj->vlist_trans[vertex].x = obj->vlist_trans[vertex].x + alpha;
obj->vlist_trans[vertex].y = -obj->vlist_trans[vertex].y + beta;
} // end for vertex
} // end Camera_To_Perspective_Screen_OBJECT4DV2
//////////////////////////////////////////////////////////////
void Perspective_To_Screen_OBJECT4DV2(OBJECT4DV2_PTR obj, CAM4DV1_PTR cam)
{
// NOTE: this is not a matrix based function
// this function transforms the perspective coordinates of an object
// into screen coordinates, based on the sent viewport info
// but it totally disregards the polygons themselves,
// it only works on the vertices in the vlist_trans[] list
// this is one way to do it, you might instead transform
// the global list of polygons in the render list since you
// are guaranteed that those polys represent geometry that
// has passed thru backfaces culling (if any)
// finally this function is really for experimental reasons only
// you would probably never let an object stay intact this far down
// the pipeline, since it's probably that there's only a single polygon
// that is visible! But this function has to transform the whole mesh!
// this function would be called after a perspective
// projection was performed on the object
// transform each vertex in the object to screen coordinates
// assumes the object has already been transformed to perspective
// coordinates and the result is in vlist_trans[]
// note: only operates on the current frame
float alpha = (0.5*cam->viewport_width-0.5);
float beta = (0.5*cam->viewport_height-0.5);
for (int vertex = 0; vertex < obj->num_vertices; vertex++)
{
// assumes the vertex is in perspective normalized coords from -1 to 1
// on each axis, simple scale them to viewport and invert y axis and project
// to screen
// transform the vertex by the view parameters in the camera
obj->vlist_trans[vertex].x = alpha + alpha*obj->vlist_trans[vertex].x;
obj->vlist_trans[vertex].y = beta - beta *obj->vlist_trans[vertex].y;
} // end for vertex
} // end Perspective_To_Screen_OBJECT4DV2
/////////////////////////////////////////////////////////////
void Convert_From_Homogeneous4D_OBJECT4DV2(OBJECT4DV2_PTR obj)
{
// this function convertes all vertices in the transformed
// vertex list from 4D homogeneous coordinates to normal 3D coordinates
// by dividing each x,y,z component by w
// note: only operates on the current frame
for (int vertex = 0; vertex < obj->num_vertices; vertex++)
{
// convert to non-homogenous coords
VECTOR4D_DIV_BY_W(&obj->vlist_trans[vertex].v);
} // end for vertex
} // end Convert_From_Homogeneous4D_OBJECT4DV2
//////////////////////////////////////////////////////////////////
int Insert_POLY4DV2_RENDERLIST4DV2(RENDERLIST4DV2_PTR rend_list,
POLY4DV2_PTR poly)
{
// converts the sent POLY4DV2 into a POLYF4DV2 and inserts it
// into the render list, this function needs optmizing
// step 0: are we full?
if (rend_list->num_polys >= RENDERLIST4DV2_MAX_POLYS)
return(0);
// step 1: copy polygon into next opening in polygon render list
// point pointer to polygon structure
rend_list->poly_ptrs[rend_list->num_polys] = &rend_list->poly_data[rend_list->num_polys];
// copy fields { ??????????? make sure ALL fields are copied, normals, textures, etc!!! }
rend_list->poly_data[rend_list->num_polys].state = poly->state;
rend_list->poly_data[rend_list->num_polys].attr = poly->attr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -