⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 matrix_assign.hpp

📁 CGAL is a collaborative effort of several sites in Europe and Israel. The goal is to make the most i
💻 HPP
📖 第 1 页 / 共 5 页
字号:
            ++ it1;        }    }    // Iterating column major case    template<class F, class M, class T>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void iterating_matrix_assign_scalar (F, M &m, const T &t, column_major_tag) {        typedef F functor_type;        typedef typename M::difference_type difference_type;        difference_type size2 (m.size2 ());        difference_type size1 (m.size1 ());        typename M::iterator2 it2 (m.begin2 ());        BOOST_UBLAS_CHECK (size1 == 0 || m.end2 () - it2 == size2, bad_size ());        while (-- size2 >= 0) {#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION            typename M::iterator1 it1 (it2.begin ());#else            typename M::iterator1 it1 (begin (it2, iterator2_tag ()));#endif            BOOST_UBLAS_CHECK (it2.end () - it1 == size1, bad_size ());            difference_type temp_size1 (size1);#ifndef BOOST_UBLAS_USE_DUFF_DEVICE            while (-- temp_size1 >= 0)                functor_type::apply (*it1, t), ++ it1;#else            DD (temp_size1, 4, r, (functor_type::apply (*it1, t), ++ it1));#endif            ++ it2;        }    }    // Indexing row major case    template<class F, class M, class T>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void indexing_matrix_assign_scalar (F, M &m, const T &t, row_major_tag) {        typedef F functor_type;        typedef typename M::size_type size_type;        size_type size1 (m.size1 ());        size_type size2 (m.size2 ());        for (size_type i = 0; i < size1; ++ i) {#ifndef BOOST_UBLAS_USE_DUFF_DEVICE            for (size_type j = 0; j < size2; ++ j)                functor_type::apply (m (i, j), t);#else            size_type j (0);            DD (size2, 4, r, (functor_type::apply (m (i, j), t), ++ j));#endif        }    }    // Indexing column major case    template<class F, class M, class T>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void indexing_matrix_assign_scalar (F, M &m, const T &t, column_major_tag) {        typedef F functor_type;        typedef typename M::size_type size_type;        size_type size2 (m.size2 ());        size_type size1 (m.size1 ());        for (size_type j = 0; j < size2; ++ j) {#ifndef BOOST_UBLAS_USE_DUFF_DEVICE            for (size_type i = 0; i < size1; ++ i)                functor_type::apply (m (i, j), t);#else            size_type i (0);            DD (size1, 4, r, (functor_type::apply (m (i, j), t), ++ i));#endif        }    }    // Dense (proxy) case    template<class F, class M, class T, class C>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void matrix_assign_scalar (F, M &m, const T &t, dense_proxy_tag, C) {        typedef F functor_type;        typedef C orientation_category;#ifdef BOOST_UBLAS_USE_INDEXING        indexing_matrix_assign_scalar (functor_type (), m, t, orientation_category ());#elif BOOST_UBLAS_USE_ITERATING        iterating_matrix_assign_scalar (functor_type (), m, t, orientation_category ());#else        typedef typename M::size_type size_type;        size_type size1 (m.size1 ());        size_type size2 (m.size2 ());        if (size1 >= BOOST_UBLAS_ITERATOR_THRESHOLD &&            size2 >= BOOST_UBLAS_ITERATOR_THRESHOLD)            iterating_matrix_assign_scalar (functor_type (), m, t, orientation_category ());        else            indexing_matrix_assign_scalar (functor_type (), m, t, orientation_category ());#endif    }    // Packed (proxy) row major case    template<class F, class M, class T>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void matrix_assign_scalar (F, M &m, const T &t, packed_proxy_tag, row_major_tag) {        typedef F functor_type;        typedef typename M::difference_type difference_type;        typename M::iterator1 it1 (m.begin1 ());        difference_type size1 (m.end1 () - it1);        while (-- size1 >= 0) {#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION            typename M::iterator2 it2 (it1.begin ());            difference_type size2 (it1.end () - it2);#else            typename M::iterator2 it2 (begin (it1, iterator1_tag ()));            difference_type size2 (end (it1, iterator1_tag ()) - it2);#endif            while (-- size2 >= 0)                functor_type::apply (*it2, t), ++ it2;            ++ it1;        }    }    // Packed (proxy) column major case    template<class F, class M, class T>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void matrix_assign_scalar (F, M &m, const T &t, packed_proxy_tag, column_major_tag) {        typedef F functor_type;        typedef typename M::difference_type difference_type;        typename M::iterator2 it2 (m.begin2 ());        difference_type size2 (m.end2 () - it2);        while (-- size2 >= 0) {#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION            typename M::iterator1 it1 (it2.begin ());            difference_type size1 (it2.end () - it1);#else            typename M::iterator1 it1 (begin (it2, iterator2_tag ()));            difference_type size1 (end (it2, iterator2_tag ()) - it1);#endif            while (-- size1 >= 0)                functor_type::apply (*it1, t), ++ it1;            ++ it2;        }    }    // Sparse (proxy) row major case    template<class F, class M, class T>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void matrix_assign_scalar (F, M &m, const T &t, sparse_proxy_tag, row_major_tag) {        typedef F functor_type;        typename M::iterator1 it1 (m.begin1 ());        typename M::iterator1 it1_end (m.end1 ());        while (it1 != it1_end) {#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION            typename M::iterator2 it2 (it1.begin ());            typename M::iterator2 it2_end (it1.end ());#else            typename M::iterator2 it2 (begin (it1, iterator1_tag ()));            typename M::iterator2 it2_end (end (it1, iterator1_tag ()));#endif            while (it2 != it2_end)                functor_type::apply (*it2, t), ++ it2;            ++ it1;        }    }    // Sparse (proxy) column major case    template<class F, class M, class T>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void matrix_assign_scalar (F, M &m, const T &t, sparse_proxy_tag, column_major_tag) {        typedef F functor_type;        typename M::iterator2 it2 (m.begin2 ());        typename M::iterator2 it2_end (m.end2 ());        while (it2 != it2_end) {#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION            typename M::iterator1 it1 (it2.begin ());            typename M::iterator1 it1_end (it2.end ());#else            typename M::iterator1 it1 (begin (it2, iterator2_tag ()));            typename M::iterator1 it1_end (end (it2, iterator2_tag ()));#endif            while (it1 != it1_end)                functor_type::apply (*it1, t), ++ it1;            ++ it2;        }    }    // Dispatcher    template<class F, class M, class T>    BOOST_UBLAS_INLINE    void matrix_assign_scalar (F, M &m, const T &t) {        typedef F functor_type;        typedef typename M::storage_category storage_category;        typedef typename M::orientation_category orientation_category;        matrix_assign_scalar (functor_type (), m, t, storage_category (), orientation_category ());    }    template<class LS, class A, class RI1, class RI2>    struct matrix_assign_traits {        typedef LS storage_category;    };    template<>    struct matrix_assign_traits<dense_tag, assign_tag, packed_random_access_iterator_tag, packed_random_access_iterator_tag> {        typedef packed_tag storage_category;    };    template<>    struct matrix_assign_traits<dense_tag, computed_assign_tag, packed_random_access_iterator_tag, packed_random_access_iterator_tag> {        typedef packed_tag storage_category;    };    template<>    struct matrix_assign_traits<dense_tag, assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_tag storage_category;    };    template<>    struct matrix_assign_traits<dense_tag, computed_assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<dense_proxy_tag, assign_tag, packed_random_access_iterator_tag, packed_random_access_iterator_tag> {        typedef packed_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<dense_proxy_tag, computed_assign_tag, packed_random_access_iterator_tag, packed_random_access_iterator_tag> {        typedef packed_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<dense_proxy_tag, assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<dense_proxy_tag, computed_assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<packed_tag, assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_tag storage_category;    };    template<>    struct matrix_assign_traits<packed_tag, computed_assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<packed_proxy_tag, assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<packed_proxy_tag, computed_assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<sparse_tag, computed_assign_tag, dense_random_access_iterator_tag, dense_random_access_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<sparse_tag, computed_assign_tag, packed_random_access_iterator_tag, packed_random_access_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    template<>    struct matrix_assign_traits<sparse_tag, computed_assign_tag, sparse_bidirectional_iterator_tag, sparse_bidirectional_iterator_tag> {        typedef sparse_proxy_tag storage_category;    };    // Iterating row major case    template<class F, class M, class E>    // This function seems to be big. So we do not let the compiler inline it.    // BOOST_UBLAS_INLINE    void iterating_matrix_assign (F, M &m, const matrix_expression<E> &e, row_major_tag) {        typedef F functor_type;        typedef typename M::difference_type difference_type;        difference_type size1 (BOOST_UBLAS_SAME (m.size1 (), e ().size1 ()));        difference_type size2 (BOOST_UBLAS_SAME (m.size2 (), e ().size2 ()));        typename M::iterator1 it1 (m.begin1 ());        BOOST_UBLAS_CHECK (size2 == 0 || m.end1 () - it1 == size1, bad_size ());        typename E::const_iterator1 it1e (e ().begin1 ());        BOOST_UBLAS_CHECK (size2 == 0 || e ().end1 () - it1e == size1, bad_size ());        while (-- size1 >= 0) {#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION            typename M::iterator2 it2 (it1.begin ());            typename E::const_iterator2 it2e (it1e.begin ());#else            typename M::iterator2 it2 (begin (it1, iterator1_tag ()));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -