📄 triangulate_impl.h
字号:
// point search index (for finding reflex verts within a potential ear) typedef grid_index_point<coord_t, int> point_index_t; typedef typename point_index_t::iterator ip_iterator; point_index_t* m_reflex_point_index;};template<class coord_t>bool poly<coord_t>::is_valid(const array<vert_t>& sorted_verts, bool check_consecutive_dupes /* = true */) const// Assert validity.{#ifndef NDEBUG if (m_loop == -1 && m_leftmost_vert == -1 && m_vertex_count == 0) { // Empty poly. return true; } assert(m_leftmost_vert == -1 || sorted_verts[m_leftmost_vert].m_poly_owner == this); // Check vert count. int first_vert = m_loop; int vi = first_vert; int vert_count = 0; int ear_count = 0; bool found_leftmost = false; int reflex_vert_count = 0; do { const poly_vert<coord_t>* pvi = &sorted_verts[vi]; // Check ownership. assert(pvi->m_poly_owner == this); // Check leftmost vert. assert(m_leftmost_vert == -1 || compare_vertices<coord_t>( (const void*) &sorted_verts[m_leftmost_vert], (const void*) &sorted_verts[vi]) <= 0); // Check link integrity. int v_next = pvi->m_next; assert(sorted_verts[v_next].m_prev == vi); if (vi == m_leftmost_vert) { found_leftmost = true; } if (check_consecutive_dupes && v_next != vi) { // Subsequent verts are not allowed to be // coincident; that causes errors in ear // classification. assert((pvi->m_v == sorted_verts[v_next].m_v) == false); } if (pvi->m_convex_result < 0) { reflex_vert_count++; } if (pvi->m_is_ear) { ear_count++; } vert_count++; vi = v_next; } while (vi != first_vert); assert(ear_count == m_ear_count); assert(vert_count == m_vertex_count); assert(found_leftmost || m_leftmost_vert == -1); // Count reflex verts in the grid index. if (m_reflex_point_index) { int check_count = 0; for (ip_iterator it = m_reflex_point_index->begin(m_reflex_point_index->get_bound()); ! it.at_end(); ++it) { check_count++; } assert(check_count == reflex_vert_count); } // Count edges in the edge index. There should be exactly one edge per vert. if (m_edge_index) { int check_count = 0; for (ib_iterator it = m_edge_index->begin(m_edge_index->get_bound()); ! it.at_end(); ++it) { check_count++; } assert(check_count == vert_count); } // Might be nice to check that all verts with (m_poly_owner == // this) are in our loop.#endif // not NDEBUG return true;}template<class coord_t>void poly<coord_t>::invalidate(const array<vert_t>& sorted_verts)// Mark as invalid/empty. Do this after linking into another poly,// for safety/debugging.{ assert(m_loop == -1 || sorted_verts[m_loop].m_poly_owner != this); // make sure our verts have been stolen already. m_loop = -1; m_leftmost_vert = -1; m_vertex_count = 0; assert(is_valid(sorted_verts));}template<class coord_t>int compare_polys_by_leftmost_vert(const void* a, const void* b){ const poly<coord_t>* poly_a = * (const poly<coord_t>**) a; const poly<coord_t>* poly_b = * (const poly<coord_t>**) b; // Vert indices are sorted, so we just compare the indices, // not the actual vert coords. if (poly_a->m_leftmost_vert < poly_b->m_leftmost_vert) { return -1; } else { // polys are not allowed to share verts, so the // leftmost vert must be different! assert(poly_a->m_leftmost_vert > poly_b->m_leftmost_vert); return 1; }}template<class coord_t>void poly<coord_t>::append_vert(array<vert_t>* sorted_verts, int vert_index)// Link the specified vert into our loop.{ assert(vert_index >= 0 && vert_index < sorted_verts->size()); assert(is_valid(*sorted_verts, false /* don't check for consecutive dupes, poly is not finished */)); m_vertex_count++; if (m_loop == -1) { // First vert. assert(m_vertex_count == 1); m_loop = vert_index; poly_vert<coord_t>* pv = &(*sorted_verts)[vert_index]; pv->m_next = vert_index; pv->m_prev = vert_index; pv->m_poly_owner = this; m_leftmost_vert = vert_index; } else { // We have a loop. Link the new vert in, behind the // first vert. poly_vert<coord_t>* pv0 = &(*sorted_verts)[m_loop]; poly_vert<coord_t>* pv = &(*sorted_verts)[vert_index]; pv->m_next = m_loop; pv->m_prev = pv0->m_prev; pv->m_poly_owner = this; (*sorted_verts)[pv0->m_prev].m_next = vert_index; pv0->m_prev = vert_index; // Update m_leftmost_vert poly_vert<coord_t>* pvl = &(*sorted_verts)[m_leftmost_vert]; if (compare_vertices<coord_t>(pv, pvl) < 0) { // v is to the left of vl; it's the new leftmost vert m_leftmost_vert = vert_index; } } assert(is_valid(*sorted_verts, false /* don't check for consecutive dupes, poly is not finished */));}template<class coord_t>int poly<coord_t>::find_valid_bridge_vert(const array<vert_t>& sorted_verts, int v1)// Find a vert v, in this poly, such that v is to the left of v1, and// the edge (v,v1) doesn't intersect any edges in this poly.{ assert(is_valid(sorted_verts)); const poly_vert<coord_t>* pv1 = &(sorted_verts[v1]); assert(pv1->m_poly_owner != this); // v1 must not be part of this poly already // Held recommends searching verts near v1 first. And for // correctness, we may only consider verts to the left of v1. // A fast & easy way to implement this is to walk backwards in // our vert array, starting with v1-1. // Walk forward to include all coincident but later verts! int vi = v1; while ((vi + 1) < sorted_verts.size() && sorted_verts[vi + 1].m_v == pv1->m_v) { vi++; } // Now scan backwards for the vert to bridge onto. for ( ; vi >= 0; vi--) { const poly_vert<coord_t>* pvi = &sorted_verts[vi]; assert(compare_vertices<coord_t>((void*) pvi, (void*) pv1) <= 0); if (pvi->m_poly_owner == this) { // Candidate is to the left of pv1, so it // might be valid. We don't consider verts to // the right of v1, because of possible // intersection with other polys. Due to the // poly sorting, we know that the edge // (pvi,pv1) can only intersect this poly. if (any_edge_intersection(sorted_verts, v1, vi) == false) { return vi; } } } // Ugh! No valid bridge vert. Shouldn't happen with valid // data. For invalid data, just pick something and live with // the intersection. fprintf(stderr, "can't find bridge for vert %d!\n", v1);//xxxxxxxxx return m_leftmost_vert;}template<class coord_t>void poly<coord_t>::remap(const array<int>& remap_table){ assert(m_loop > -1); assert(m_leftmost_vert > -1); m_loop = remap_table[m_loop]; m_leftmost_vert = remap_table[m_leftmost_vert];}template<class coord_t>void poly<coord_t>::remap_for_duped_verts(const array<vert_t>& sorted_verts, int v0, int v1)// Remap for the case of v0 and v1 being duplicated, and subsequent// verts being shifted up.{ assert(m_loop > -1); assert(m_leftmost_vert > -1); m_loop = remap_index_for_duped_verts(m_loop, v0, v1); m_leftmost_vert = remap_index_for_duped_verts(m_leftmost_vert, v0, v1); // Remap the vert indices stored in the edge index. if (m_edge_index) { // Optimization: we don't need to remap anything // that's wholly to the left of v0. Towards the end // of bridge building, this could be the vast majority // of edges. assert(v0 < v1); index_box<coord_t> bound = m_edge_index->get_bound(); bound.min.x = sorted_verts[v0].m_v.x; for (ib_iterator it = m_edge_index->begin(bound); ! it.at_end(); ++it) { it->value = remap_index_for_duped_verts(it->value, v0, v1); } } // We shouldn't have a point index right now. assert(m_reflex_point_index == NULL);}template<class coord_t>void poly<coord_t>::classify_vert(array<vert_t>* sorted_verts, int vi)// Decide if vi is an ear, and mark its m_is_ear flag & update counts.{ poly_vert<coord_t>* pvi = &((*sorted_verts)[vi]); const poly_vert<coord_t>* pv_prev = &((*sorted_verts)[pvi->m_prev]); const poly_vert<coord_t>* pv_next = &((*sorted_verts)[pvi->m_next]); if (pvi->m_convex_result > 0) { if (vert_in_cone(*sorted_verts, pvi->m_prev, vi, pvi->m_next, pv_next->m_next) && vert_in_cone(*sorted_verts, pvi->m_next, pv_prev->m_prev, pvi->m_prev, vi)) { if (! ear_contains_reflex_vertex(*sorted_verts, pvi->m_prev, vi, pvi->m_next)) { // Valid ear. assert(pvi->m_is_ear == false); pvi->m_is_ear = true; m_ear_count++; } } }}template<class coord_t>void poly<coord_t>::dirty_vert(array<vert_t>* sorted_verts, int vi)// Call when an adjacent vert gets clipped. Recomputes// m_convex_result and clears m_is_ear for the vert.{ poly_vert<coord_t>* pvi = &((*sorted_verts)[vi]); int new_convex_result = vertex_left_test<coord_t>((*sorted_verts)[pvi->m_prev].m_v, pvi->m_v, (*sorted_verts)[pvi->m_next].m_v); if (new_convex_result < 0 && pvi->m_convex_result >= 0) { // Vert is newly reflex. // Add to reflex vert index assert(m_reflex_point_index); m_reflex_point_index->add(index_point<coord_t>(pvi->m_v.x, pvi->m_v.y), vi); } else if (pvi->m_convex_result < 0 && new_convex_result >= 0) { // Vert is newly convex/colinear. // Remove from reflex vert index. assert(m_reflex_point_index); ip_iterator it = m_reflex_point_index->find(index_point<coord_t>(pvi->m_v.x, pvi->m_v.y), vi); assert(it.at_end() == false); m_reflex_point_index->remove(&(*it)); } pvi->m_convex_result = new_convex_result; if (pvi->m_is_ear) { // Clear its ear flag. pvi->m_is_ear = false; m_ear_count--; }}template<class coord_t>bool poly<coord_t>::build_ear_list(array<vert_t>* sorted_verts, tu_random::generator* rg)// Initialize our ear loop with all the ears that can be clipped.//// Returns true if we clipped any degenerates while looking for ears.{ assert(is_valid(*sorted_verts)); assert(m_ear_count == 0); bool clipped_any_degenerates = false; if (m_vertex_count < 3) { // Not a real poly, no ears. return false; } // Go around the loop, evaluating the verts. int vi = m_loop; int verts_processed_count = 0; for (;;) { const poly_vert<coord_t>* pvi = &((*sorted_verts)[vi]); const poly_vert<coord_t>* pv_prev = &((*sorted_verts)[pvi->m_prev]); const poly_vert<coord_t>* pv_next = &((*sorted_verts)[pvi->m_next]); // classification of ear, CE2 from FIST paper: // // v[i-1],v[i],v[i+1] of P form an ear of P iff // // 1. v[i] is a convex vertex // // 2. the interior plus boundary of triangle // v[i-1],v[i],v[i+1] does not contain any reflex vertex of P // (except v[i-1] or v[i+1]) // // 3. v[i-1] is in the cone(v[i],v[i+1],v[i+2]) and v[i+1] is // in the cone(v[i-2],v[i-1],v[i]) (not strictly necessary, // but used for efficiency and robustness) if ((pvi->m_v == pv_next->m_v) || (pvi->m_v == pv_prev->m_v) || (vertex_left_test(pv_prev->m_v, pvi->m_v, pv_next->m_v) == 0 && vert_is_duplicated(*sorted_verts, vi) == false)) { // Degenerate case: zero-area triangle. // // Remove it (and any additional degenerates chained onto this ear). vi = remove_degenerate_chain(sorted_verts, vi); clipped_any_degenerates = true; if (m_vertex_count < 3) { break; } continue; } else { classify_vert(sorted_verts, vi); } vi = pvi->m_next; verts_processed_count++; if (verts_processed_count >= m_vertex_count) { break; } // Performance optimization: if we're finding lots of // ears, keep our working set local by processing a // few ears soon after examining them. if (m_ear_count > 5 && verts_processed_count > 10) { break; } } assert(is_valid(*sorted_verts, true /* do check for dupes */)); // @@ idea for cheap ear shape control: m_loop = best_ear_found; return clipped_any_degenerates;}template<class coord_t>int poly<coord_t>::get_next_ear(const array<vert_t>& sorted_verts, tu_random::generator* rg)// Return the next ear to be clipped.{ assert(m_ear_count > 0); while (sorted_verts[m_loop].m_is_ear == false) { m_loop = sorted_verts[m_loop].m_next; } int next_ear = m_loop;// define this if you want to randomize the ear selection (should// improve the average ear shape, at low cost).//#define RANDOMIZE#ifdef RANDOMIZE // Randomization: skip a random number of ears. if (m_ear_count > 6) { // Decide how many ears to skip. // Here's a lot of twiddling to avoid a % op. Worth it? int random_range = m_ear_count >> 2; static const int MASK_TABLE_SIZE = 8; int random_mask[MASK_TABLE_SIZE] = { 1, 1, 1, 3, 3, 3, 3, 7 // roughly, the largest (2^N-1) <= index }; if (random_range >= MASK_TABLE_SIZE) random_range = MASK_TABLE_SIZE - 1; assert(random_range > 0); int random_skip = rg->next_random() & random_mask[random_range]; // Do the skipping, by manipulating m_loop. while (random_skip > 0)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -