1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
15/** \ingroup SparseCore_Module
16 *
17 * \class SparseMatrix
18 *
19 * \brief A versatible sparse matrix representation
20 *
21 * This class implements a more versatile variants of the common \em compressed row/column storage format.
22 * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
23 * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
24 * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
25 * can be done with limited memory reallocation and copies.
26 *
27 * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
28 * compatible with many library.
29 *
30 * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
31 *
32 * \tparam _Scalar the scalar type, i.e. the type of the coefficients
33 * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
34 * is ColMajor or RowMajor. The default is 0 which means column-major.
35 * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
36 *
37 * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
38 * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
39 * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
40 *
41 * This class can be extended with the help of the plugin mechanism described on the page
42 * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
43 */
44
45namespace internal {
46template<typename _Scalar, int _Options, typename _StorageIndex>
47struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48{
49 typedef _Scalar Scalar;
50 typedef _StorageIndex StorageIndex;
51 typedef Sparse StorageKind;
52 typedef MatrixXpr XprKind;
53 enum {
54 RowsAtCompileTime = Dynamic,
55 ColsAtCompileTime = Dynamic,
56 MaxRowsAtCompileTime = Dynamic,
57 MaxColsAtCompileTime = Dynamic,
58 Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59 SupportedAccessPatterns = InnerRandomAccessPattern
60 };
61};
62
63template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65{
66 typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69
70 typedef _Scalar Scalar;
71 typedef Dense StorageKind;
72 typedef _StorageIndex StorageIndex;
73 typedef MatrixXpr XprKind;
74
75 enum {
76 RowsAtCompileTime = Dynamic,
77 ColsAtCompileTime = 1,
78 MaxRowsAtCompileTime = Dynamic,
79 MaxColsAtCompileTime = 1,
80 Flags = LvalueBit
81 };
82};
83
84template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87{
88 enum {
89 Flags = 0
90 };
91};
92
93} // end namespace internal
94
95template<typename _Scalar, int _Options, typename _StorageIndex>
96class SparseMatrix
97 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98{
99 typedef SparseCompressedBase<SparseMatrix> Base;
100 using Base::convert_index;
101 friend class SparseVector<_Scalar,0,_StorageIndex>;
102 public:
103 using Base::isCompressed;
104 using Base::nonZeros;
105 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
106 using Base::operator+=;
107 using Base::operator-=;
108
109 typedef MappedSparseMatrix<Scalar,Flags> Map;
110 typedef Diagonal<SparseMatrix> DiagonalReturnType;
111 typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
112 typedef typename Base::InnerIterator InnerIterator;
113 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
114
115
116 using Base::IsRowMajor;
117 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
118 enum {
119 Options = _Options
120 };
121
122 typedef typename Base::IndexVector IndexVector;
123 typedef typename Base::ScalarVector ScalarVector;
124 protected:
125 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
126
127 Index m_outerSize;
128 Index m_innerSize;
129 StorageIndex* m_outerIndex;
130 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
131 Storage m_data;
132
133 public:
134
135 /** \returns the number of rows of the matrix */
136 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
137 /** \returns the number of columns of the matrix */
138 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
139
140 /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
141 inline Index innerSize() const { return m_innerSize; }
142 /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
143 inline Index outerSize() const { return m_outerSize; }
144
145 /** \returns a const pointer to the array of values.
146 * This function is aimed at interoperability with other libraries.
147 * \sa innerIndexPtr(), outerIndexPtr() */
148 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
149 /** \returns a non-const pointer to the array of values.
150 * This function is aimed at interoperability with other libraries.
151 * \sa innerIndexPtr(), outerIndexPtr() */
152 inline Scalar* valuePtr() { return m_data.valuePtr(); }
153
154 /** \returns a const pointer to the array of inner indices.
155 * This function is aimed at interoperability with other libraries.
156 * \sa valuePtr(), outerIndexPtr() */
157 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
158 /** \returns a non-const pointer to the array of inner indices.
159 * This function is aimed at interoperability with other libraries.
160 * \sa valuePtr(), outerIndexPtr() */
161 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
162
163 /** \returns a const pointer to the array of the starting positions of the inner vectors.
164 * This function is aimed at interoperability with other libraries.
165 * \sa valuePtr(), innerIndexPtr() */
166 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
167 /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
168 * This function is aimed at interoperability with other libraries.
169 * \sa valuePtr(), innerIndexPtr() */
170 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
171
172 /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
173 * This function is aimed at interoperability with other libraries.
174 * \warning it returns the null pointer 0 in compressed mode */
175 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
176 /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
177 * This function is aimed at interoperability with other libraries.
178 * \warning it returns the null pointer 0 in compressed mode */
179 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
180
181 /** \internal */
182 inline Storage& data() { return m_data; }
183 /** \internal */
184 inline const Storage& data() const { return m_data; }
185
186 /** \returns the value of the matrix at position \a i, \a j
187 * This function returns Scalar(0) if the element is an explicit \em zero */
188 inline Scalar coeff(Index row, Index col) const
189 {
190 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
191
192 const Index outer = IsRowMajor ? row : col;
193 const Index inner = IsRowMajor ? col : row;
194 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
195 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
196 }
197
198 /** \returns a non-const reference to the value of the matrix at position \a i, \a j
199 *
200 * If the element does not exist then it is inserted via the insert(Index,Index) function
201 * which itself turns the matrix into a non compressed form if that was not the case.
202 *
203 * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
204 * function if the element does not already exist.
205 */
206 inline Scalar& coeffRef(Index row, Index col)
207 {
208 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
209
210 const Index outer = IsRowMajor ? row : col;
211 const Index inner = IsRowMajor ? col : row;
212
213 Index start = m_outerIndex[outer];
214 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
215 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
216 if(end<=start)
217 return insert(row,col);
218 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
219 if((p<end) && (m_data.index(p)==inner))
220 return m_data.value(p);
221 else
222 return insert(row,col);
223 }
224
225 /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
226 * The non zero coefficient must \b not already exist.
227 *
228 * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
229 * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
230 * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
231 * inserted by increasing outer-indices.
232 *
233 * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
234 * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
235 *
236 * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
237 * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
238 *
239 */
240 Scalar& insert(Index row, Index col);
241
242 public:
243
244 /** Removes all non zeros but keep allocated memory
245 *
246 * This function does not free the currently allocated memory. To release as much as memory as possible,
247 * call \code mat.data().squeeze(); \endcode after resizing it.
248 *
249 * \sa resize(Index,Index), data()
250 */
251 inline void setZero()
252 {
253 m_data.clear();
254 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
255 if(m_innerNonZeros)
256 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
257 }
258
259 /** Preallocates \a reserveSize non zeros.
260 *
261 * Precondition: the matrix must be in compressed mode. */
262 inline void reserve(Index reserveSize)
263 {
264 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
265 m_data.reserve(reserveSize);
266 }
267
268 #ifdef EIGEN_PARSED_BY_DOXYGEN
269 /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
270 *
271 * This function turns the matrix in non-compressed mode.
272 *
273 * The type \c SizesType must expose the following interface:
274 \code
275 typedef value_type;
276 const value_type& operator[](i) const;
277 \endcode
278 * for \c i in the [0,this->outerSize()[ range.
279 * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
280 */
281 template<class SizesType>
282 inline void reserve(const SizesType& reserveSizes);
283 #else
284 template<class SizesType>
285 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
286 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
287 typename
288 #endif
289 SizesType::value_type())
290 {
291 EIGEN_UNUSED_VARIABLE(enableif);
292 reserveInnerVectors(reserveSizes);
293 }
294 #endif // EIGEN_PARSED_BY_DOXYGEN
295 protected:
296 template<class SizesType>
297 inline void reserveInnerVectors(const SizesType& reserveSizes)
298 {
299 if(isCompressed())
300 {
301 Index totalReserveSize = 0;
302 // turn the matrix into non-compressed mode
303 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
304 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
305
306 // temporarily use m_innerSizes to hold the new starting points.
307 StorageIndex* newOuterIndex = m_innerNonZeros;
308
309 StorageIndex count = 0;
310 for(Index j=0; j<m_outerSize; ++j)
311 {
312 newOuterIndex[j] = count;
313 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
314 totalReserveSize += reserveSizes[j];
315 }
316 m_data.reserve(totalReserveSize);
317 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
318 for(Index j=m_outerSize-1; j>=0; --j)
319 {
320 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
321 for(Index i=innerNNZ-1; i>=0; --i)
322 {
323 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
324 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
325 }
326 previousOuterIndex = m_outerIndex[j];
327 m_outerIndex[j] = newOuterIndex[j];
328 m_innerNonZeros[j] = innerNNZ;
329 }
330 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
331
332 m_data.resize(m_outerIndex[m_outerSize]);
333 }
334 else
335 {
336 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
337 if (!newOuterIndex) internal::throw_std_bad_alloc();
338
339 StorageIndex count = 0;
340 for(Index j=0; j<m_outerSize; ++j)
341 {
342 newOuterIndex[j] = count;
343 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
344 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
345 count += toReserve + m_innerNonZeros[j];
346 }
347 newOuterIndex[m_outerSize] = count;
348
349 m_data.resize(count);
350 for(Index j=m_outerSize-1; j>=0; --j)
351 {
352 Index offset = newOuterIndex[j] - m_outerIndex[j];
353 if(offset>0)
354 {
355 StorageIndex innerNNZ = m_innerNonZeros[j];
356 for(Index i=innerNNZ-1; i>=0; --i)
357 {
358 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
359 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
360 }
361 }
362 }
363
364 std::swap(m_outerIndex, newOuterIndex);
365 std::free(newOuterIndex);
366 }
367
368 }
369 public:
370
371 //--- low level purely coherent filling ---
372
373 /** \internal
374 * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
375 * - the nonzero does not already exist
376 * - the new coefficient is the last one according to the storage order
377 *
378 * Before filling a given inner vector you must call the statVec(Index) function.
379 *
380 * After an insertion session, you should call the finalize() function.
381 *
382 * \sa insert, insertBackByOuterInner, startVec */
383 inline Scalar& insertBack(Index row, Index col)
384 {
385 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
386 }
387
388 /** \internal
389 * \sa insertBack, startVec */
390 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
391 {
392 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
393 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
394 Index p = m_outerIndex[outer+1];
395 ++m_outerIndex[outer+1];
396 m_data.append(Scalar(0), inner);
397 return m_data.value(p);
398 }
399
400 /** \internal
401 * \warning use it only if you know what you are doing */
402 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
403 {
404 Index p = m_outerIndex[outer+1];
405 ++m_outerIndex[outer+1];
406 m_data.append(Scalar(0), inner);
407 return m_data.value(p);
408 }
409
410 /** \internal
411 * \sa insertBack, insertBackByOuterInner */
412 inline void startVec(Index outer)
413 {
414 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
415 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
416 m_outerIndex[outer+1] = m_outerIndex[outer];
417 }
418
419 /** \internal
420 * Must be called after inserting a set of non zero entries using the low level compressed API.
421 */
422 inline void finalize()
423 {
424 if(isCompressed())
425 {
426 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
427 Index i = m_outerSize;
428 // find the last filled column
429 while (i>=0 && m_outerIndex[i]==0)
430 --i;
431 ++i;
432 while (i<=m_outerSize)
433 {
434 m_outerIndex[i] = size;
435 ++i;
436 }
437 }
438 }
439
440 //---
441
442 template<typename InputIterators>
443 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
444
445 template<typename InputIterators,typename DupFunctor>
446 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
447
448 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
449
450 template<typename DupFunctor>
451 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
452
453 //---
454
455 /** \internal
456 * same as insert(Index,Index) except that the indices are given relative to the storage order */
457 Scalar& insertByOuterInner(Index j, Index i)
458 {
459 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
460 }
461
462 /** Turns the matrix into the \em compressed format.
463 */
464 void makeCompressed()
465 {
466 if(isCompressed())
467 return;
468
469 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
470
471 Index oldStart = m_outerIndex[1];
472 m_outerIndex[1] = m_innerNonZeros[0];
473 for(Index j=1; j<m_outerSize; ++j)
474 {
475 Index nextOldStart = m_outerIndex[j+1];
476 Index offset = oldStart - m_outerIndex[j];
477 if(offset>0)
478 {
479 for(Index k=0; k<m_innerNonZeros[j]; ++k)
480 {
481 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
482 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
483 }
484 }
485 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
486 oldStart = nextOldStart;
487 }
488 std::free(m_innerNonZeros);
489 m_innerNonZeros = 0;
490 m_data.resize(m_outerIndex[m_outerSize]);
491 m_data.squeeze();
492 }
493
494 /** Turns the matrix into the uncompressed mode */
495 void uncompress()
496 {
497 if(m_innerNonZeros != 0)
498 return;
499 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
500 for (Index i = 0; i < m_outerSize; i++)
501 {
502 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
503 }
504 }
505
506 /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
507 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
508 {
509 prune(default_prunning_func(reference,epsilon));
510 }
511
512 /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
513 * The functor type \a KeepFunc must implement the following function:
514 * \code
515 * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
516 * \endcode
517 * \sa prune(Scalar,RealScalar)
518 */
519 template<typename KeepFunc>
520 void prune(const KeepFunc& keep = KeepFunc())
521 {
522 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
523 makeCompressed();
524
525 StorageIndex k = 0;
526 for(Index j=0; j<m_outerSize; ++j)
527 {
528 Index previousStart = m_outerIndex[j];
529 m_outerIndex[j] = k;
530 Index end = m_outerIndex[j+1];
531 for(Index i=previousStart; i<end; ++i)
532 {
533 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
534 {
535 m_data.value(k) = m_data.value(i);
536 m_data.index(k) = m_data.index(i);
537 ++k;
538 }
539 }
540 }
541 m_outerIndex[m_outerSize] = k;
542 m_data.resize(k,0);
543 }
544
545 /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
546 *
547 * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
548 * and the storage of the out of bounds coefficients is kept and reserved.
549 * Call makeCompressed() to pack the entries and squeeze extra memory.
550 *
551 * \sa reserve(), setZero(), makeCompressed()
552 */
553 void conservativeResize(Index rows, Index cols)
554 {
555 // No change
556 if (this->rows() == rows && this->cols() == cols) return;
557
558 // If one dimension is null, then there is nothing to be preserved
559 if(rows==0 || cols==0) return resize(rows,cols);
560
561 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
562 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
563 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
564
565 // Deals with inner non zeros
566 if (m_innerNonZeros)
567 {
568 // Resize m_innerNonZeros
569 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
570 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
571 m_innerNonZeros = newInnerNonZeros;
572
573 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
574 m_innerNonZeros[i] = 0;
575 }
576 else if (innerChange < 0)
577 {
578 // Inner size decreased: allocate a new m_innerNonZeros
579 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
580 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
581 for(Index i = 0; i < m_outerSize; i++)
582 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
583 }
584
585 // Change the m_innerNonZeros in case of a decrease of inner size
586 if (m_innerNonZeros && innerChange < 0)
587 {
588 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
589 {
590 StorageIndex &n = m_innerNonZeros[i];
591 StorageIndex start = m_outerIndex[i];
592 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
593 }
594 }
595
596 m_innerSize = newInnerSize;
597
598 // Re-allocate outer index structure if necessary
599 if (outerChange == 0)
600 return;
601
602 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
603 if (!newOuterIndex) internal::throw_std_bad_alloc();
604 m_outerIndex = newOuterIndex;
605 if (outerChange > 0)
606 {
607 StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
608 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
609 m_outerIndex[i] = last;
610 }
611 m_outerSize += outerChange;
612 }
613
614 /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
615 *
616 * This function does not free the currently allocated memory. To release as much as memory as possible,
617 * call \code mat.data().squeeze(); \endcode after resizing it.
618 *
619 * \sa reserve(), setZero()
620 */
621 void resize(Index rows, Index cols)
622 {
623 const Index outerSize = IsRowMajor ? rows : cols;
624 m_innerSize = IsRowMajor ? cols : rows;
625 m_data.clear();
626 if (m_outerSize != outerSize || m_outerSize==0)
627 {
628 std::free(m_outerIndex);
629 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
630 if (!m_outerIndex) internal::throw_std_bad_alloc();
631
632 m_outerSize = outerSize;
633 }
634 if(m_innerNonZeros)
635 {
636 std::free(m_innerNonZeros);
637 m_innerNonZeros = 0;
638 }
639 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
640 }
641
642 /** \internal
643 * Resize the nonzero vector to \a size */
644 void resizeNonZeros(Index size)
645 {
646 m_data.resize(size);
647 }
648
649 /** \returns a const expression of the diagonal coefficients. */
650 const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
651
652 /** \returns a read-write expression of the diagonal coefficients.
653 * \warning If the diagonal entries are written, then all diagonal
654 * entries \b must already exist, otherwise an assertion will be raised.
655 */
656 DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
657
658 /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
659 inline SparseMatrix()
660 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
661 {
662 check_template_parameters();
663 resize(0, 0);
664 }
665
666 /** Constructs a \a rows \c x \a cols empty matrix */
667 inline SparseMatrix(Index rows, Index cols)
668 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
669 {
670 check_template_parameters();
671 resize(rows, cols);
672 }
673
674 /** Constructs a sparse matrix from the sparse expression \a other */
675 template<typename OtherDerived>
676 inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
677 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
678 {
679 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
680 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
681 check_template_parameters();
682 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
683 if (needToTranspose)
684 *this = other.derived();
685 else
686 {
687 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
688 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
689 #endif
690 internal::call_assignment_no_alias(*this, other.derived());
691 }
692 }
693
694 /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
695 template<typename OtherDerived, unsigned int UpLo>
696 inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
697 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
698 {
699 check_template_parameters();
700 Base::operator=(other);
701 }
702
703 /** Copy constructor (it performs a deep copy) */
704 inline SparseMatrix(const SparseMatrix& other)
705 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
706 {
707 check_template_parameters();
708 *this = other.derived();
709 }
710
711 /** \brief Copy constructor with in-place evaluation */
712 template<typename OtherDerived>
713 SparseMatrix(const ReturnByValue<OtherDerived>& other)
714 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
715 {
716 check_template_parameters();
717 initAssignment(other);
718 other.evalTo(*this);
719 }
720
721 /** \brief Copy constructor with in-place evaluation */
722 template<typename OtherDerived>
723 explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
724 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
725 {
726 check_template_parameters();
727 *this = other.derived();
728 }
729
730 /** Swaps the content of two sparse matrices of the same type.
731 * This is a fast operation that simply swaps the underlying pointers and parameters. */
732 inline void swap(SparseMatrix& other)
733 {
734 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
735 std::swap(m_outerIndex, other.m_outerIndex);
736 std::swap(m_innerSize, other.m_innerSize);
737 std::swap(m_outerSize, other.m_outerSize);
738 std::swap(m_innerNonZeros, other.m_innerNonZeros);
739 m_data.swap(other.m_data);
740 }
741
742 /** Sets *this to the identity matrix.
743 * This function also turns the matrix into compressed mode, and drop any reserved memory. */
744 inline void setIdentity()
745 {
746 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
747 this->m_data.resize(rows());
748 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
749 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
750 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
751 std::free(m_innerNonZeros);
752 m_innerNonZeros = 0;
753 }
754 inline SparseMatrix& operator=(const SparseMatrix& other)
755 {
756 if (other.isRValue())
757 {
758 swap(other.const_cast_derived());
759 }
760 else if(this!=&other)
761 {
762 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
763 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
764 #endif
765 initAssignment(other);
766 if(other.isCompressed())
767 {
768 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
769 m_data = other.m_data;
770 }
771 else
772 {
773 Base::operator=(other);
774 }
775 }
776 return *this;
777 }
778
779#ifndef EIGEN_PARSED_BY_DOXYGEN
780 template<typename OtherDerived>
781 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
782 { return Base::operator=(other.derived()); }
783#endif // EIGEN_PARSED_BY_DOXYGEN
784
785 template<typename OtherDerived>
786 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
787
788 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
789 {
790 EIGEN_DBG_SPARSE(
791 s << "Nonzero entries:\n";
792 if(m.isCompressed())
793 {
794 for (Index i=0; i<m.nonZeros(); ++i)
795 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
796 }
797 else
798 {
799 for (Index i=0; i<m.outerSize(); ++i)
800 {
801 Index p = m.m_outerIndex[i];
802 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
803 Index k=p;
804 for (; k<pe; ++k) {
805 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
806 }
807 for (; k<m.m_outerIndex[i+1]; ++k) {
808 s << "(_,_) ";
809 }
810 }
811 }
812 s << std::endl;
813 s << std::endl;
814 s << "Outer pointers:\n";
815 for (Index i=0; i<m.outerSize(); ++i) {
816 s << m.m_outerIndex[i] << " ";
817 }
818 s << " $" << std::endl;
819 if(!m.isCompressed())
820 {
821 s << "Inner non zeros:\n";
822 for (Index i=0; i<m.outerSize(); ++i) {
823 s << m.m_innerNonZeros[i] << " ";
824 }
825 s << " $" << std::endl;
826 }
827 s << std::endl;
828 );
829 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
830 return s;
831 }
832
833 /** Destructor */
834 inline ~SparseMatrix()
835 {
836 std::free(m_outerIndex);
837 std::free(m_innerNonZeros);
838 }
839
840 /** Overloaded for performance */
841 Scalar sum() const;
842
843# ifdef EIGEN_SPARSEMATRIX_PLUGIN
844# include EIGEN_SPARSEMATRIX_PLUGIN
845# endif
846
847protected:
848
849 template<typename Other>
850 void initAssignment(const Other& other)
851 {
852 resize(other.rows(), other.cols());
853 if(m_innerNonZeros)
854 {
855 std::free(m_innerNonZeros);
856 m_innerNonZeros = 0;
857 }
858 }
859
860 /** \internal
861 * \sa insert(Index,Index) */
862 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
863
864 /** \internal
865 * A vector object that is equal to 0 everywhere but v at the position i */
866 class SingletonVector
867 {
868 StorageIndex m_index;
869 StorageIndex m_value;
870 public:
871 typedef StorageIndex value_type;
872 SingletonVector(Index i, Index v)
873 : m_index(convert_index(i)), m_value(convert_index(v))
874 {}
875
876 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
877 };
878
879 /** \internal
880 * \sa insert(Index,Index) */
881 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
882
883public:
884 /** \internal
885 * \sa insert(Index,Index) */
886 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
887 {
888 const Index outer = IsRowMajor ? row : col;
889 const Index inner = IsRowMajor ? col : row;
890
891 eigen_assert(!isCompressed());
892 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
893
894 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
895 m_data.index(p) = convert_index(inner);
896 return (m_data.value(p) = Scalar(0));
897 }
898
899private:
900 static void check_template_parameters()
901 {
902 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
903 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
904 }
905
906 struct default_prunning_func {
907 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
908 inline bool operator() (const Index&, const Index&, const Scalar& value) const
909 {
910 return !internal::isMuchSmallerThan(value, reference, epsilon);
911 }
912 Scalar reference;
913 RealScalar epsilon;
914 };
915};
916
917namespace internal {
918
919template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
920void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
921{
922 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
923 typedef typename SparseMatrixType::Scalar Scalar;
924 typedef typename SparseMatrixType::StorageIndex StorageIndex;
925 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
926
927 if(begin!=end)
928 {
929 // pass 1: count the nnz per inner-vector
930 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
931 wi.setZero();
932 for(InputIterator it(begin); it!=end; ++it)
933 {
934 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
935 wi(IsRowMajor ? it->col() : it->row())++;
936 }
937
938 // pass 2: insert all the elements into trMat
939 trMat.reserve(wi);
940 for(InputIterator it(begin); it!=end; ++it)
941 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
942
943 // pass 3:
944 trMat.collapseDuplicates(dup_func);
945 }
946
947 // pass 4: transposed copy -> implicit sorting
948 mat = trMat;
949}
950
951}
952
953
954/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
955 *
956 * A \em triplet is a tuple (i,j,value) defining a non-zero element.
957 * The input list of triplets does not have to be sorted, and can contains duplicated elements.
958 * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
959 * This is a \em O(n) operation, with \em n the number of triplet elements.
960 * The initial contents of \c *this is destroyed.
961 * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
962 * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
963 *
964 * The \a InputIterators value_type must provide the following interface:
965 * \code
966 * Scalar value() const; // the value
967 * Scalar row() const; // the row index i
968 * Scalar col() const; // the column index j
969 * \endcode
970 * See for instance the Eigen::Triplet template class.
971 *
972 * Here is a typical usage example:
973 * \code
974 typedef Triplet<double> T;
975 std::vector<T> tripletList;
976 triplets.reserve(estimation_of_entries);
977 for(...)
978 {
979 // ...
980 tripletList.push_back(T(i,j,v_ij));
981 }
982 SparseMatrixType m(rows,cols);
983 m.setFromTriplets(tripletList.begin(), tripletList.end());
984 // m is ready to go!
985 * \endcode
986 *
987 * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
988 * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
989 * be explicitely stored into a std::vector for instance.
990 */
991template<typename Scalar, int _Options, typename _StorageIndex>
992template<typename InputIterators>
993void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
994{
995 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
996}
997
998/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
999 * \code
1000 * value = dup_func(OldValue, NewValue)
1001 * \endcode
1002 * Here is a C++11 example keeping the latest entry only:
1003 * \code
1004 * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1005 * \endcode
1006 */
1007template<typename Scalar, int _Options, typename _StorageIndex>
1008template<typename InputIterators,typename DupFunctor>
1009void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1010{
1011 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1012}
1013
1014/** \internal */
1015template<typename Scalar, int _Options, typename _StorageIndex>
1016template<typename DupFunctor>
1017void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1018{
1019 eigen_assert(!isCompressed());
1020 // TODO, in practice we should be able to use m_innerNonZeros for that task
1021 IndexVector wi(innerSize());
1022 wi.fill(-1);
1023 StorageIndex count = 0;
1024 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1025 for(Index j=0; j<outerSize(); ++j)
1026 {
1027 StorageIndex start = count;
1028 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1029 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1030 {
1031 Index i = m_data.index(k);
1032 if(wi(i)>=start)
1033 {
1034 // we already meet this entry => accumulate it
1035 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1036 }
1037 else
1038 {
1039 m_data.value(count) = m_data.value(k);
1040 m_data.index(count) = m_data.index(k);
1041 wi(i) = count;
1042 ++count;
1043 }
1044 }
1045 m_outerIndex[j] = start;
1046 }
1047 m_outerIndex[m_outerSize] = count;
1048
1049 // turn the matrix into compressed form
1050 std::free(m_innerNonZeros);
1051 m_innerNonZeros = 0;
1052 m_data.resize(m_outerIndex[m_outerSize]);
1053}
1054
1055template<typename Scalar, int _Options, typename _StorageIndex>
1056template<typename OtherDerived>
1057EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1058{
1059 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1060 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1061
1062 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1063 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1064 #endif
1065
1066 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1067 if (needToTranspose)
1068 {
1069 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1070 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1071 #endif
1072 // two passes algorithm:
1073 // 1 - compute the number of coeffs per dest inner vector
1074 // 2 - do the actual copy/eval
1075 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1076 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1077 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1078 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1079 OtherCopy otherCopy(other.derived());
1080 OtherCopyEval otherCopyEval(otherCopy);
1081
1082 SparseMatrix dest(other.rows(),other.cols());
1083 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1084
1085 // pass 1
1086 // FIXME the above copy could be merged with that pass
1087 for (Index j=0; j<otherCopy.outerSize(); ++j)
1088 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1089 ++dest.m_outerIndex[it.index()];
1090
1091 // prefix sum
1092 StorageIndex count = 0;
1093 IndexVector positions(dest.outerSize());
1094 for (Index j=0; j<dest.outerSize(); ++j)
1095 {
1096 StorageIndex tmp = dest.m_outerIndex[j];
1097 dest.m_outerIndex[j] = count;
1098 positions[j] = count;
1099 count += tmp;
1100 }
1101 dest.m_outerIndex[dest.outerSize()] = count;
1102 // alloc
1103 dest.m_data.resize(count);
1104 // pass 2
1105 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1106 {
1107 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1108 {
1109 Index pos = positions[it.index()]++;
1110 dest.m_data.index(pos) = j;
1111 dest.m_data.value(pos) = it.value();
1112 }
1113 }
1114 this->swap(dest);
1115 return *this;
1116 }
1117 else
1118 {
1119 if(other.isRValue())
1120 {
1121 initAssignment(other.derived());
1122 }
1123 // there is no special optimization
1124 return Base::operator=(other.derived());
1125 }
1126}
1127
1128template<typename _Scalar, int _Options, typename _StorageIndex>
1129typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1130{
1131 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1132
1133 const Index outer = IsRowMajor ? row : col;
1134 const Index inner = IsRowMajor ? col : row;
1135
1136 if(isCompressed())
1137 {
1138 if(nonZeros()==0)
1139 {
1140 // reserve space if not already done
1141 if(m_data.allocatedSize()==0)
1142 m_data.reserve(2*m_innerSize);
1143
1144 // turn the matrix into non-compressed mode
1145 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1146 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1147
1148 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1149
1150 // pack all inner-vectors to the end of the pre-allocated space
1151 // and allocate the entire free-space to the first inner-vector
1152 StorageIndex end = convert_index(m_data.allocatedSize());
1153 for(Index j=1; j<=m_outerSize; ++j)
1154 m_outerIndex[j] = end;
1155 }
1156 else
1157 {
1158 // turn the matrix into non-compressed mode
1159 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1160 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1161 for(Index j=0; j<m_outerSize; ++j)
1162 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1163 }
1164 }
1165
1166 // check whether we can do a fast "push back" insertion
1167 Index data_end = m_data.allocatedSize();
1168
1169 // First case: we are filling a new inner vector which is packed at the end.
1170 // We assume that all remaining inner-vectors are also empty and packed to the end.
1171 if(m_outerIndex[outer]==data_end)
1172 {
1173 eigen_internal_assert(m_innerNonZeros[outer]==0);
1174
1175 // pack previous empty inner-vectors to end of the used-space
1176 // and allocate the entire free-space to the current inner-vector.
1177 StorageIndex p = convert_index(m_data.size());
1178 Index j = outer;
1179 while(j>=0 && m_innerNonZeros[j]==0)
1180 m_outerIndex[j--] = p;
1181
1182 // push back the new element
1183 ++m_innerNonZeros[outer];
1184 m_data.append(Scalar(0), inner);
1185
1186 // check for reallocation
1187 if(data_end != m_data.allocatedSize())
1188 {
1189 // m_data has been reallocated
1190 // -> move remaining inner-vectors back to the end of the free-space
1191 // so that the entire free-space is allocated to the current inner-vector.
1192 eigen_internal_assert(data_end < m_data.allocatedSize());
1193 StorageIndex new_end = convert_index(m_data.allocatedSize());
1194 for(Index k=outer+1; k<=m_outerSize; ++k)
1195 if(m_outerIndex[k]==data_end)
1196 m_outerIndex[k] = new_end;
1197 }
1198 return m_data.value(p);
1199 }
1200
1201 // Second case: the next inner-vector is packed to the end
1202 // and the current inner-vector end match the used-space.
1203 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1204 {
1205 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1206
1207 // add space for the new element
1208 ++m_innerNonZeros[outer];
1209 m_data.resize(m_data.size()+1);
1210
1211 // check for reallocation
1212 if(data_end != m_data.allocatedSize())
1213 {
1214 // m_data has been reallocated
1215 // -> move remaining inner-vectors back to the end of the free-space
1216 // so that the entire free-space is allocated to the current inner-vector.
1217 eigen_internal_assert(data_end < m_data.allocatedSize());
1218 StorageIndex new_end = convert_index(m_data.allocatedSize());
1219 for(Index k=outer+1; k<=m_outerSize; ++k)
1220 if(m_outerIndex[k]==data_end)
1221 m_outerIndex[k] = new_end;
1222 }
1223
1224 // and insert it at the right position (sorted insertion)
1225 Index startId = m_outerIndex[outer];
1226 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1227 while ( (p > startId) && (m_data.index(p-1) > inner) )
1228 {
1229 m_data.index(p) = m_data.index(p-1);
1230 m_data.value(p) = m_data.value(p-1);
1231 --p;
1232 }
1233
1234 m_data.index(p) = convert_index(inner);
1235 return (m_data.value(p) = 0);
1236 }
1237
1238 if(m_data.size() != m_data.allocatedSize())
1239 {
1240 // make sure the matrix is compatible to random un-compressed insertion:
1241 m_data.resize(m_data.allocatedSize());
1242 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1243 }
1244
1245 return insertUncompressed(row,col);
1246}
1247
1248template<typename _Scalar, int _Options, typename _StorageIndex>
1249EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1250{
1251 eigen_assert(!isCompressed());
1252
1253 const Index outer = IsRowMajor ? row : col;
1254 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1255
1256 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1257 StorageIndex innerNNZ = m_innerNonZeros[outer];
1258 if(innerNNZ>=room)
1259 {
1260 // this inner vector is full, we need to reallocate the whole buffer :(
1261 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1262 }
1263
1264 Index startId = m_outerIndex[outer];
1265 Index p = startId + m_innerNonZeros[outer];
1266 while ( (p > startId) && (m_data.index(p-1) > inner) )
1267 {
1268 m_data.index(p) = m_data.index(p-1);
1269 m_data.value(p) = m_data.value(p-1);
1270 --p;
1271 }
1272 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1273
1274 m_innerNonZeros[outer]++;
1275
1276 m_data.index(p) = inner;
1277 return (m_data.value(p) = Scalar(0));
1278}
1279
1280template<typename _Scalar, int _Options, typename _StorageIndex>
1281EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1282{
1283 eigen_assert(isCompressed());
1284
1285 const Index outer = IsRowMajor ? row : col;
1286 const Index inner = IsRowMajor ? col : row;
1287
1288 Index previousOuter = outer;
1289 if (m_outerIndex[outer+1]==0)
1290 {
1291 // we start a new inner vector
1292 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1293 {
1294 m_outerIndex[previousOuter] = convert_index(m_data.size());
1295 --previousOuter;
1296 }
1297 m_outerIndex[outer+1] = m_outerIndex[outer];
1298 }
1299
1300 // here we have to handle the tricky case where the outerIndex array
1301 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1302 // the 2nd inner vector...
1303 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1304 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1305
1306 std::size_t startId = m_outerIndex[outer];
1307 // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1308 std::size_t p = m_outerIndex[outer+1];
1309 ++m_outerIndex[outer+1];
1310
1311 double reallocRatio = 1;
1312 if (m_data.allocatedSize()<=m_data.size())
1313 {
1314 // if there is no preallocated memory, let's reserve a minimum of 32 elements
1315 if (m_data.size()==0)
1316 {
1317 m_data.reserve(32);
1318 }
1319 else
1320 {
1321 // we need to reallocate the data, to reduce multiple reallocations
1322 // we use a smart resize algorithm based on the current filling ratio
1323 // in addition, we use double to avoid integers overflows
1324 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1325 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1326 // furthermore we bound the realloc ratio to:
1327 // 1) reduce multiple minor realloc when the matrix is almost filled
1328 // 2) avoid to allocate too much memory when the matrix is almost empty
1329 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1330 }
1331 }
1332 m_data.resize(m_data.size()+1,reallocRatio);
1333
1334 if (!isLastVec)
1335 {
1336 if (previousOuter==-1)
1337 {
1338 // oops wrong guess.
1339 // let's correct the outer offsets
1340 for (Index k=0; k<=(outer+1); ++k)
1341 m_outerIndex[k] = 0;
1342 Index k=outer+1;
1343 while(m_outerIndex[k]==0)
1344 m_outerIndex[k++] = 1;
1345 while (k<=m_outerSize && m_outerIndex[k]!=0)
1346 m_outerIndex[k++]++;
1347 p = 0;
1348 --k;
1349 k = m_outerIndex[k]-1;
1350 while (k>0)
1351 {
1352 m_data.index(k) = m_data.index(k-1);
1353 m_data.value(k) = m_data.value(k-1);
1354 k--;
1355 }
1356 }
1357 else
1358 {
1359 // we are not inserting into the last inner vec
1360 // update outer indices:
1361 Index j = outer+2;
1362 while (j<=m_outerSize && m_outerIndex[j]!=0)
1363 m_outerIndex[j++]++;
1364 --j;
1365 // shift data of last vecs:
1366 Index k = m_outerIndex[j]-1;
1367 while (k>=Index(p))
1368 {
1369 m_data.index(k) = m_data.index(k-1);
1370 m_data.value(k) = m_data.value(k-1);
1371 k--;
1372 }
1373 }
1374 }
1375
1376 while ( (p > startId) && (m_data.index(p-1) > inner) )
1377 {
1378 m_data.index(p) = m_data.index(p-1);
1379 m_data.value(p) = m_data.value(p-1);
1380 --p;
1381 }
1382
1383 m_data.index(p) = inner;
1384 return (m_data.value(p) = Scalar(0));
1385}
1386
1387namespace internal {
1388
1389template<typename _Scalar, int _Options, typename _StorageIndex>
1390struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1391 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1392{
1393 typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1394 typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1395 evaluator() : Base() {}
1396 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1397};
1398
1399}
1400
1401} // end namespace Eigen
1402
1403#endif // EIGEN_SPARSEMATRIX_H
1404