1 | // This file is part of Eigen, a lightweight C++ template library |
2 | // for linear algebra. |
3 | // |
4 | // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> |
5 | // |
6 | // This Source Code Form is subject to the terms of the Mozilla |
7 | // Public License v. 2.0. If a copy of the MPL was not distributed |
8 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. |
9 | |
10 | #ifndef EIGEN_SPARSE_BLOCK_H |
11 | #define EIGEN_SPARSE_BLOCK_H |
12 | |
13 | namespace Eigen { |
14 | |
15 | // Subset of columns or rows |
16 | template<typename XprType, int BlockRows, int BlockCols> |
17 | class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse> |
18 | : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> > |
19 | { |
20 | typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested; |
21 | typedef Block<XprType, BlockRows, BlockCols, true> BlockType; |
22 | public: |
23 | enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; |
24 | protected: |
25 | enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; |
26 | typedef SparseMatrixBase<BlockType> Base; |
27 | using Base::convert_index; |
28 | public: |
29 | EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) |
30 | |
31 | inline BlockImpl(XprType& xpr, Index i) |
32 | : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) |
33 | {} |
34 | |
35 | inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) |
36 | : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) |
37 | {} |
38 | |
39 | EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } |
40 | EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } |
41 | |
42 | Index nonZeros() const |
43 | { |
44 | typedef internal::evaluator<XprType> EvaluatorType; |
45 | EvaluatorType matEval(m_matrix); |
46 | Index nnz = 0; |
47 | Index end = m_outerStart + m_outerSize.value(); |
48 | for(Index j=m_outerStart; j<end; ++j) |
49 | for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it) |
50 | ++nnz; |
51 | return nnz; |
52 | } |
53 | |
54 | inline const Scalar coeff(Index row, Index col) const |
55 | { |
56 | return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); |
57 | } |
58 | |
59 | inline const Scalar coeff(Index index) const |
60 | { |
61 | return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); |
62 | } |
63 | |
64 | inline const XprType& nestedExpression() const { return m_matrix; } |
65 | inline XprType& nestedExpression() { return m_matrix; } |
66 | Index startRow() const { return IsRowMajor ? m_outerStart : 0; } |
67 | Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } |
68 | Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } |
69 | Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } |
70 | |
71 | protected: |
72 | |
73 | typename internal::ref_selector<XprType>::non_const_type m_matrix; |
74 | Index m_outerStart; |
75 | const internal::variable_if_dynamic<Index, OuterSize> m_outerSize; |
76 | |
77 | protected: |
78 | // Disable assignment with clear error message. |
79 | // Note that simply removing operator= yields compilation errors with ICC+MSVC |
80 | template<typename T> |
81 | BlockImpl& operator=(const T&) |
82 | { |
83 | EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY); |
84 | return *this; |
85 | } |
86 | }; |
87 | |
88 | |
89 | /*************************************************************************** |
90 | * specialization for SparseMatrix |
91 | ***************************************************************************/ |
92 | |
93 | namespace internal { |
94 | |
95 | template<typename SparseMatrixType, int BlockRows, int BlockCols> |
96 | class sparse_matrix_block_impl |
97 | : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > |
98 | { |
99 | typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested; |
100 | typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType; |
101 | typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base; |
102 | using Base::convert_index; |
103 | public: |
104 | enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; |
105 | EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) |
106 | protected: |
107 | typedef typename Base::IndexVector IndexVector; |
108 | enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; |
109 | public: |
110 | |
111 | inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i) |
112 | : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) |
113 | {} |
114 | |
115 | inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) |
116 | : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) |
117 | {} |
118 | |
119 | template<typename OtherDerived> |
120 | inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other) |
121 | { |
122 | typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType; |
123 | _NestedMatrixType& matrix = m_matrix; |
124 | // This assignment is slow if this vector set is not empty |
125 | // and/or it is not at the end of the nonzeros of the underlying matrix. |
126 | |
127 | // 1 - eval to a temporary to avoid transposition and/or aliasing issues |
128 | Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived()); |
129 | eigen_internal_assert(tmp.outerSize()==m_outerSize.value()); |
130 | |
131 | // 2 - let's check whether there is enough allocated memory |
132 | Index nnz = tmp.nonZeros(); |
133 | Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block |
134 | Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block |
135 | Index block_size = end - start; // available room in the current block |
136 | Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; |
137 | |
138 | Index free_size = m_matrix.isCompressed() |
139 | ? Index(matrix.data().allocatedSize()) + block_size |
140 | : block_size; |
141 | |
142 | Index tmp_start = tmp.outerIndexPtr()[0]; |
143 | |
144 | bool update_trailing_pointers = false; |
145 | if(nnz>free_size) |
146 | { |
147 | // realloc manually to reduce copies |
148 | typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); |
149 | |
150 | internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr()); |
151 | internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr()); |
152 | |
153 | internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start); |
154 | internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start); |
155 | |
156 | internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz); |
157 | internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz); |
158 | |
159 | newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); |
160 | |
161 | matrix.data().swap(newdata); |
162 | |
163 | update_trailing_pointers = true; |
164 | } |
165 | else |
166 | { |
167 | if(m_matrix.isCompressed()) |
168 | { |
169 | // no need to realloc, simply copy the tail at its respective position and insert tmp |
170 | matrix.data().resize(start + nnz + tail_size); |
171 | |
172 | internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz); |
173 | internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz); |
174 | |
175 | update_trailing_pointers = true; |
176 | } |
177 | |
178 | internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start); |
179 | internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start); |
180 | } |
181 | |
182 | // update outer index pointers and innerNonZeros |
183 | if(IsVectorAtCompileTime) |
184 | { |
185 | if(!m_matrix.isCompressed()) |
186 | matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz); |
187 | matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start); |
188 | } |
189 | else |
190 | { |
191 | StorageIndex p = StorageIndex(start); |
192 | for(Index k=0; k<m_outerSize.value(); ++k) |
193 | { |
194 | StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros()); |
195 | if(!m_matrix.isCompressed()) |
196 | matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k; |
197 | matrix.outerIndexPtr()[m_outerStart+k] = p; |
198 | p += nnz_k; |
199 | } |
200 | } |
201 | |
202 | if(update_trailing_pointers) |
203 | { |
204 | StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size); |
205 | for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k) |
206 | { |
207 | matrix.outerIndexPtr()[k] += offset; |
208 | } |
209 | } |
210 | |
211 | return derived(); |
212 | } |
213 | |
214 | inline BlockType& operator=(const BlockType& other) |
215 | { |
216 | return operator=<BlockType>(other); |
217 | } |
218 | |
219 | inline const Scalar* valuePtr() const |
220 | { return m_matrix.valuePtr(); } |
221 | inline Scalar* valuePtr() |
222 | { return m_matrix.valuePtr(); } |
223 | |
224 | inline const StorageIndex* innerIndexPtr() const |
225 | { return m_matrix.innerIndexPtr(); } |
226 | inline StorageIndex* innerIndexPtr() |
227 | { return m_matrix.innerIndexPtr(); } |
228 | |
229 | inline const StorageIndex* outerIndexPtr() const |
230 | { return m_matrix.outerIndexPtr() + m_outerStart; } |
231 | inline StorageIndex* outerIndexPtr() |
232 | { return m_matrix.outerIndexPtr() + m_outerStart; } |
233 | |
234 | inline const StorageIndex* innerNonZeroPtr() const |
235 | { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); } |
236 | inline StorageIndex* innerNonZeroPtr() |
237 | { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); } |
238 | |
239 | bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; } |
240 | |
241 | inline Scalar& coeffRef(Index row, Index col) |
242 | { |
243 | return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); |
244 | } |
245 | |
246 | inline const Scalar coeff(Index row, Index col) const |
247 | { |
248 | return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); |
249 | } |
250 | |
251 | inline const Scalar coeff(Index index) const |
252 | { |
253 | return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); |
254 | } |
255 | |
256 | const Scalar& lastCoeff() const |
257 | { |
258 | EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl); |
259 | eigen_assert(Base::nonZeros()>0); |
260 | if(m_matrix.isCompressed()) |
261 | return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; |
262 | else |
263 | return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; |
264 | } |
265 | |
266 | EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } |
267 | EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } |
268 | |
269 | inline const SparseMatrixType& nestedExpression() const { return m_matrix; } |
270 | inline SparseMatrixType& nestedExpression() { return m_matrix; } |
271 | Index startRow() const { return IsRowMajor ? m_outerStart : 0; } |
272 | Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } |
273 | Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } |
274 | Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } |
275 | |
276 | protected: |
277 | |
278 | typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix; |
279 | Index m_outerStart; |
280 | const internal::variable_if_dynamic<Index, OuterSize> m_outerSize; |
281 | |
282 | }; |
283 | |
284 | } // namespace internal |
285 | |
286 | template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> |
287 | class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse> |
288 | : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols> |
289 | { |
290 | public: |
291 | typedef _StorageIndex StorageIndex; |
292 | typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; |
293 | typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base; |
294 | inline BlockImpl(SparseMatrixType& xpr, Index i) |
295 | : Base(xpr, i) |
296 | {} |
297 | |
298 | inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) |
299 | : Base(xpr, startRow, startCol, blockRows, blockCols) |
300 | {} |
301 | |
302 | using Base::operator=; |
303 | }; |
304 | |
305 | template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> |
306 | class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse> |
307 | : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols> |
308 | { |
309 | public: |
310 | typedef _StorageIndex StorageIndex; |
311 | typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; |
312 | typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base; |
313 | inline BlockImpl(SparseMatrixType& xpr, Index i) |
314 | : Base(xpr, i) |
315 | {} |
316 | |
317 | inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) |
318 | : Base(xpr, startRow, startCol, blockRows, blockCols) |
319 | {} |
320 | |
321 | using Base::operator=; |
322 | private: |
323 | template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i); |
324 | template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr); |
325 | }; |
326 | |
327 | //---------- |
328 | |
329 | /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this |
330 | * is col-major (resp. row-major). |
331 | */ |
332 | template<typename Derived> |
333 | typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) |
334 | { return InnerVectorReturnType(derived(), outer); } |
335 | |
336 | /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this |
337 | * is col-major (resp. row-major). Read-only. |
338 | */ |
339 | template<typename Derived> |
340 | const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const |
341 | { return ConstInnerVectorReturnType(derived(), outer); } |
342 | |
343 | /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this |
344 | * is col-major (resp. row-major). |
345 | */ |
346 | template<typename Derived> |
347 | typename SparseMatrixBase<Derived>::InnerVectorsReturnType |
348 | SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) |
349 | { |
350 | return Block<Derived,Dynamic,Dynamic,true>(derived(), |
351 | IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, |
352 | IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); |
353 | |
354 | } |
355 | |
356 | /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this |
357 | * is col-major (resp. row-major). Read-only. |
358 | */ |
359 | template<typename Derived> |
360 | const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType |
361 | SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const |
362 | { |
363 | return Block<const Derived,Dynamic,Dynamic,true>(derived(), |
364 | IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, |
365 | IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); |
366 | |
367 | } |
368 | |
369 | /** Generic implementation of sparse Block expression. |
370 | * Real-only. |
371 | */ |
372 | template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> |
373 | class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse> |
374 | : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator |
375 | { |
376 | typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType; |
377 | typedef SparseMatrixBase<BlockType> Base; |
378 | using Base::convert_index; |
379 | public: |
380 | enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; |
381 | EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) |
382 | |
383 | typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested; |
384 | |
385 | /** Column or Row constructor |
386 | */ |
387 | inline BlockImpl(XprType& xpr, Index i) |
388 | : m_matrix(xpr), |
389 | m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0), |
390 | m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0), |
391 | m_blockRows(BlockRows==1 ? 1 : xpr.rows()), |
392 | m_blockCols(BlockCols==1 ? 1 : xpr.cols()) |
393 | {} |
394 | |
395 | /** Dynamic-size constructor |
396 | */ |
397 | inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) |
398 | : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols)) |
399 | {} |
400 | |
401 | inline Index rows() const { return m_blockRows.value(); } |
402 | inline Index cols() const { return m_blockCols.value(); } |
403 | |
404 | inline Scalar& coeffRef(Index row, Index col) |
405 | { |
406 | return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value()); |
407 | } |
408 | |
409 | inline const Scalar coeff(Index row, Index col) const |
410 | { |
411 | return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); |
412 | } |
413 | |
414 | inline Scalar& coeffRef(Index index) |
415 | { |
416 | return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), |
417 | m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); |
418 | } |
419 | |
420 | inline const Scalar coeff(Index index) const |
421 | { |
422 | return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), |
423 | m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); |
424 | } |
425 | |
426 | inline const XprType& nestedExpression() const { return m_matrix; } |
427 | inline XprType& nestedExpression() { return m_matrix; } |
428 | Index startRow() const { return m_startRow.value(); } |
429 | Index startCol() const { return m_startCol.value(); } |
430 | Index blockRows() const { return m_blockRows.value(); } |
431 | Index blockCols() const { return m_blockCols.value(); } |
432 | |
433 | protected: |
434 | // friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>; |
435 | friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >; |
436 | |
437 | Index nonZeros() const { return Dynamic; } |
438 | |
439 | typename internal::ref_selector<XprType>::non_const_type m_matrix; |
440 | const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow; |
441 | const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol; |
442 | const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows; |
443 | const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols; |
444 | |
445 | protected: |
446 | // Disable assignment with clear error message. |
447 | // Note that simply removing operator= yields compilation errors with ICC+MSVC |
448 | template<typename T> |
449 | BlockImpl& operator=(const T&) |
450 | { |
451 | EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY); |
452 | return *this; |
453 | } |
454 | |
455 | }; |
456 | |
457 | namespace internal { |
458 | |
459 | template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> |
460 | struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased > |
461 | : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> > |
462 | { |
463 | class InnerVectorInnerIterator; |
464 | class OuterVectorInnerIterator; |
465 | public: |
466 | typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType; |
467 | typedef typename XprType::StorageIndex StorageIndex; |
468 | typedef typename XprType::Scalar Scalar; |
469 | |
470 | enum { |
471 | IsRowMajor = XprType::IsRowMajor, |
472 | |
473 | OuterVector = (BlockCols==1 && ArgType::IsRowMajor) |
474 | | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&". |
475 | // revert to || as soon as not needed anymore. |
476 | (BlockRows==1 && !ArgType::IsRowMajor), |
477 | |
478 | CoeffReadCost = evaluator<ArgType>::CoeffReadCost, |
479 | Flags = XprType::Flags |
480 | }; |
481 | |
482 | typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator; |
483 | |
484 | explicit unary_evaluator(const XprType& op) |
485 | : m_argImpl(op.nestedExpression()), m_block(op) |
486 | {} |
487 | |
488 | inline Index nonZerosEstimate() const { |
489 | Index nnz = m_block.nonZeros(); |
490 | if(nnz<0) |
491 | return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size(); |
492 | return nnz; |
493 | } |
494 | |
495 | protected: |
496 | typedef typename evaluator<ArgType>::InnerIterator EvalIterator; |
497 | |
498 | evaluator<ArgType> m_argImpl; |
499 | const XprType &m_block; |
500 | }; |
501 | |
502 | template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> |
503 | class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator |
504 | : public EvalIterator |
505 | { |
506 | enum { IsRowMajor = unary_evaluator::IsRowMajor }; |
507 | const XprType& m_block; |
508 | Index m_end; |
509 | public: |
510 | |
511 | EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer) |
512 | : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())), |
513 | m_block(aEval.m_block), |
514 | m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()) |
515 | { |
516 | while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) ) |
517 | EvalIterator::operator++(); |
518 | } |
519 | |
520 | inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); } |
521 | inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } |
522 | inline Index row() const { return EvalIterator::row() - m_block.startRow(); } |
523 | inline Index col() const { return EvalIterator::col() - m_block.startCol(); } |
524 | |
525 | inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; } |
526 | }; |
527 | |
528 | template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> |
529 | class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator |
530 | { |
531 | enum { IsRowMajor = unary_evaluator::IsRowMajor }; |
532 | const unary_evaluator& m_eval; |
533 | Index m_outerPos; |
534 | const Index m_innerIndex; |
535 | Index m_end; |
536 | EvalIterator m_it; |
537 | public: |
538 | |
539 | EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer) |
540 | : m_eval(aEval), |
541 | m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ), |
542 | m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()), |
543 | m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()), |
544 | m_it(m_eval.m_argImpl, m_outerPos) |
545 | { |
546 | EIGEN_UNUSED_VARIABLE(outer); |
547 | eigen_assert(outer==0); |
548 | |
549 | while(m_it && m_it.index() < m_innerIndex) ++m_it; |
550 | if((!m_it) || (m_it.index()!=m_innerIndex)) |
551 | ++(*this); |
552 | } |
553 | |
554 | inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); } |
555 | inline Index outer() const { return 0; } |
556 | inline Index row() const { return IsRowMajor ? 0 : index(); } |
557 | inline Index col() const { return IsRowMajor ? index() : 0; } |
558 | |
559 | inline Scalar value() const { return m_it.value(); } |
560 | inline Scalar& valueRef() { return m_it.valueRef(); } |
561 | |
562 | inline OuterVectorInnerIterator& operator++() |
563 | { |
564 | // search next non-zero entry |
565 | while(++m_outerPos<m_end) |
566 | { |
567 | // Restart iterator at the next inner-vector: |
568 | m_it.~EvalIterator(); |
569 | ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos); |
570 | // search for the key m_innerIndex in the current outer-vector |
571 | while(m_it && m_it.index() < m_innerIndex) ++m_it; |
572 | if(m_it && m_it.index()==m_innerIndex) break; |
573 | } |
574 | return *this; |
575 | } |
576 | |
577 | inline operator bool() const { return m_outerPos < m_end; } |
578 | }; |
579 | |
580 | template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> |
581 | struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased> |
582 | : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > > |
583 | { |
584 | typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType; |
585 | typedef evaluator<SparseCompressedBase<XprType> > Base; |
586 | explicit unary_evaluator(const XprType &xpr) : Base(xpr) {} |
587 | }; |
588 | |
589 | template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols> |
590 | struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased> |
591 | : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > > |
592 | { |
593 | typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType; |
594 | typedef evaluator<SparseCompressedBase<XprType> > Base; |
595 | explicit unary_evaluator(const XprType &xpr) : Base(xpr) {} |
596 | }; |
597 | |
598 | } // end namespace internal |
599 | |
600 | |
601 | } // end namespace Eigen |
602 | |
603 | #endif // EIGEN_SPARSE_BLOCK_H |
604 | |