1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6//
7// This Source Code Form is subject to the terms of the Mozilla
8// Public License v. 2.0. If a copy of the MPL was not distributed
9// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11#ifndef EIGEN_XPRHELPER_H
12#define EIGEN_XPRHELPER_H
13
14// just a workaround because GCC seems to not really like empty structs
15// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
16// so currently we simply disable this optimization for gcc 4.3
17#if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
18 #define EIGEN_EMPTY_STRUCT_CTOR(X) \
19 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
20 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
21#else
22 #define EIGEN_EMPTY_STRUCT_CTOR(X)
23#endif
24
25namespace Eigen {
26
27namespace internal {
28
29template<typename IndexDest, typename IndexSrc>
30EIGEN_DEVICE_FUNC
31inline IndexDest convert_index(const IndexSrc& idx) {
32 // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
33 eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
34 return IndexDest(idx);
35}
36
37
38// promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
39// expression * scalar
40// Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.
41// The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType<ScalarBinaryOpTraits<ExprScalar,T,op> >::value using the proper order for ExprScalar and T.
42// Then the logic is as follows:
43// - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned.
44// - otherwise, NumTraits<ExprScalar>::Literal is returned if T is implicitly convertible to NumTraits<ExprScalar>::Literal AND that this does not imply a float to integer conversion.
45// - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion.
46// - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE).
47template<typename ExprScalar,typename T, bool IsSupported>
48struct promote_scalar_arg;
49
50template<typename S,typename T>
51struct promote_scalar_arg<S,T,true>
52{
53 typedef T type;
54};
55
56// Recursively check safe conversion to PromotedType, and then ExprScalar if they are different.
57template<typename ExprScalar,typename T,typename PromotedType,
58 bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value,
59 bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>
60struct promote_scalar_arg_unsupported;
61
62// Start recursion with NumTraits<ExprScalar>::Literal
63template<typename S,typename T>
64struct promote_scalar_arg<S,T,false> : promote_scalar_arg_unsupported<S,T,typename NumTraits<S>::Literal> {};
65
66// We found a match!
67template<typename S,typename T, typename PromotedType>
68struct promote_scalar_arg_unsupported<S,T,PromotedType,true,true>
69{
70 typedef PromotedType type;
71};
72
73// No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different,
74// so let's try to promote to ExprScalar
75template<typename ExprScalar,typename T, typename PromotedType>
76struct promote_scalar_arg_unsupported<ExprScalar,T,PromotedType,false,true>
77 : promote_scalar_arg_unsupported<ExprScalar,T,ExprScalar>
78{};
79
80// Unsafe real-to-integer, let's stop.
81template<typename S,typename T, typename PromotedType, bool ConvertibleToLiteral>
82struct promote_scalar_arg_unsupported<S,T,PromotedType,ConvertibleToLiteral,false> {};
83
84// T is not even convertible to ExprScalar, let's stop.
85template<typename S,typename T>
86struct promote_scalar_arg_unsupported<S,T,S,false,true> {};
87
88//classes inheriting no_assignment_operator don't generate a default operator=.
89class no_assignment_operator
90{
91 private:
92 no_assignment_operator& operator=(const no_assignment_operator&);
93};
94
95/** \internal return the index type with the largest number of bits */
96template<typename I1, typename I2>
97struct promote_index_type
98{
99 typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
100};
101
102/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
103 * can be accessed using value() and setValue().
104 * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
105 */
106template<typename T, int Value> class variable_if_dynamic
107{
108 public:
109 EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
110 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
111 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
112 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
113};
114
115template<typename T> class variable_if_dynamic<T, Dynamic>
116{
117 T m_value;
118 EIGEN_DEVICE_FUNC variable_if_dynamic() { eigen_assert(false); }
119 public:
120 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value) : m_value(value) {}
121 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }
122 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
123};
124
125/** \internal like variable_if_dynamic but for DynamicIndex
126 */
127template<typename T, int Value> class variable_if_dynamicindex
128{
129 public:
130 EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
131 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
132 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
133 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
134};
135
136template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
137{
138 T m_value;
139 EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }
140 public:
141 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {}
142 EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; }
143 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
144};
145
146template<typename T> struct functor_traits
147{
148 enum
149 {
150 Cost = 10,
151 PacketAccess = false,
152 IsRepeatable = false
153 };
154};
155
156template<typename T> struct packet_traits;
157
158template<typename T> struct unpacket_traits
159{
160 typedef T type;
161 typedef T half;
162 enum
163 {
164 size = 1,
165 alignment = 1
166 };
167};
168
169template<int Size, typename PacketType,
170 bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
171struct find_best_packet_helper;
172
173template< int Size, typename PacketType>
174struct find_best_packet_helper<Size,PacketType,true>
175{
176 typedef PacketType type;
177};
178
179template<int Size, typename PacketType>
180struct find_best_packet_helper<Size,PacketType,false>
181{
182 typedef typename find_best_packet_helper<Size,typename unpacket_traits<PacketType>::half>::type type;
183};
184
185template<typename T, int Size>
186struct find_best_packet
187{
188 typedef typename find_best_packet_helper<Size,typename packet_traits<T>::type>::type type;
189};
190
191#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
192template<int ArrayBytes, int AlignmentBytes,
193 bool Match = bool((ArrayBytes%AlignmentBytes)==0),
194 bool TryHalf = bool(EIGEN_MIN_ALIGN_BYTES<AlignmentBytes) >
195struct compute_default_alignment_helper
196{
197 enum { value = 0 };
198};
199
200template<int ArrayBytes, int AlignmentBytes, bool TryHalf>
201struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, true, TryHalf> // Match
202{
203 enum { value = AlignmentBytes };
204};
205
206template<int ArrayBytes, int AlignmentBytes>
207struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, false, true> // Try-half
208{
209 // current packet too large, try with an half-packet
210 enum { value = compute_default_alignment_helper<ArrayBytes, AlignmentBytes/2>::value };
211};
212#else
213// If static alignment is disabled, no need to bother.
214// This also avoids a division by zero in "bool Match = bool((ArrayBytes%AlignmentBytes)==0)"
215template<int ArrayBytes, int AlignmentBytes>
216struct compute_default_alignment_helper
217{
218 enum { value = 0 };
219};
220#endif
221
222template<typename T, int Size> struct compute_default_alignment {
223 enum { value = compute_default_alignment_helper<Size*sizeof(T),EIGEN_MAX_STATIC_ALIGN_BYTES>::value };
224};
225
226template<typename T> struct compute_default_alignment<T,Dynamic> {
227 enum { value = EIGEN_MAX_ALIGN_BYTES };
228};
229
230template<typename _Scalar, int _Rows, int _Cols,
231 int _Options = AutoAlign |
232 ( (_Rows==1 && _Cols!=1) ? RowMajor
233 : (_Cols==1 && _Rows!=1) ? ColMajor
234 : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
235 int _MaxRows = _Rows,
236 int _MaxCols = _Cols
237> class make_proper_matrix_type
238{
239 enum {
240 IsColVector = _Cols==1 && _Rows!=1,
241 IsRowVector = _Rows==1 && _Cols!=1,
242 Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
243 : IsRowVector ? (_Options | RowMajor) & ~ColMajor
244 : _Options
245 };
246 public:
247 typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
248};
249
250template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
251class compute_matrix_flags
252{
253 enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 };
254 public:
255 // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<>
256 // and then propagate this information to the evaluator's flags.
257 // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage.
258 enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit };
259};
260
261template<int _Rows, int _Cols> struct size_at_compile_time
262{
263 enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
264};
265
266template<typename XprType> struct size_of_xpr_at_compile_time
267{
268 enum { ret = size_at_compile_time<traits<XprType>::RowsAtCompileTime,traits<XprType>::ColsAtCompileTime>::ret };
269};
270
271/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
272 * whereas eval is a const reference in the case of a matrix
273 */
274
275template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
276template<typename T, typename BaseClassType, int Flags> struct plain_matrix_type_dense;
277template<typename T> struct plain_matrix_type<T,Dense>
278{
279 typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, traits<T>::Flags>::type type;
280};
281template<typename T> struct plain_matrix_type<T,DiagonalShape>
282{
283 typedef typename T::PlainObject type;
284};
285
286template<typename T, int Flags> struct plain_matrix_type_dense<T,MatrixXpr,Flags>
287{
288 typedef Matrix<typename traits<T>::Scalar,
289 traits<T>::RowsAtCompileTime,
290 traits<T>::ColsAtCompileTime,
291 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
292 traits<T>::MaxRowsAtCompileTime,
293 traits<T>::MaxColsAtCompileTime
294 > type;
295};
296
297template<typename T, int Flags> struct plain_matrix_type_dense<T,ArrayXpr,Flags>
298{
299 typedef Array<typename traits<T>::Scalar,
300 traits<T>::RowsAtCompileTime,
301 traits<T>::ColsAtCompileTime,
302 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
303 traits<T>::MaxRowsAtCompileTime,
304 traits<T>::MaxColsAtCompileTime
305 > type;
306};
307
308/* eval : the return type of eval(). For matrices, this is just a const reference
309 * in order to avoid a useless copy
310 */
311
312template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
313
314template<typename T> struct eval<T,Dense>
315{
316 typedef typename plain_matrix_type<T>::type type;
317// typedef typename T::PlainObject type;
318// typedef T::Matrix<typename traits<T>::Scalar,
319// traits<T>::RowsAtCompileTime,
320// traits<T>::ColsAtCompileTime,
321// AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
322// traits<T>::MaxRowsAtCompileTime,
323// traits<T>::MaxColsAtCompileTime
324// > type;
325};
326
327template<typename T> struct eval<T,DiagonalShape>
328{
329 typedef typename plain_matrix_type<T>::type type;
330};
331
332// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
333template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
334struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
335{
336 typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
337};
338
339template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
340struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
341{
342 typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
343};
344
345
346/* similar to plain_matrix_type, but using the evaluator's Flags */
347template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_object_eval;
348
349template<typename T>
350struct plain_object_eval<T,Dense>
351{
352 typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, evaluator<T>::Flags>::type type;
353};
354
355
356/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
357 */
358template<typename T> struct plain_matrix_type_column_major
359{
360 enum { Rows = traits<T>::RowsAtCompileTime,
361 Cols = traits<T>::ColsAtCompileTime,
362 MaxRows = traits<T>::MaxRowsAtCompileTime,
363 MaxCols = traits<T>::MaxColsAtCompileTime
364 };
365 typedef Matrix<typename traits<T>::Scalar,
366 Rows,
367 Cols,
368 (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
369 MaxRows,
370 MaxCols
371 > type;
372};
373
374/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
375 */
376template<typename T> struct plain_matrix_type_row_major
377{
378 enum { Rows = traits<T>::RowsAtCompileTime,
379 Cols = traits<T>::ColsAtCompileTime,
380 MaxRows = traits<T>::MaxRowsAtCompileTime,
381 MaxCols = traits<T>::MaxColsAtCompileTime
382 };
383 typedef Matrix<typename traits<T>::Scalar,
384 Rows,
385 Cols,
386 (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
387 MaxRows,
388 MaxCols
389 > type;
390};
391
392/** \internal The reference selector for template expressions. The idea is that we don't
393 * need to use references for expressions since they are light weight proxy
394 * objects which should generate no copying overhead. */
395template <typename T>
396struct ref_selector
397{
398 typedef typename conditional<
399 bool(traits<T>::Flags & NestByRefBit),
400 T const&,
401 const T
402 >::type type;
403
404 typedef typename conditional<
405 bool(traits<T>::Flags & NestByRefBit),
406 T &,
407 T
408 >::type non_const_type;
409};
410
411/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
412template<typename T1, typename T2>
413struct transfer_constness
414{
415 typedef typename conditional<
416 bool(internal::is_const<T1>::value),
417 typename internal::add_const_on_value_type<T2>::type,
418 T2
419 >::type type;
420};
421
422
423// However, we still need a mechanism to detect whether an expression which is evaluated multiple time
424// has to be evaluated into a temporary.
425// That's the purpose of this new nested_eval helper:
426/** \internal Determines how a given expression should be nested when evaluated multiple times.
427 * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
428 * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or
429 * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
430 * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
431 * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
432 *
433 * \tparam T the type of the expression being nested.
434 * \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
435 * \tparam PlainObject the type of the temporary if needed.
436 */
437template<typename T, int n, typename PlainObject = typename plain_object_eval<T>::type> struct nested_eval
438{
439 enum {
440 ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
441 CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a tempory?
442 // Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
443 // This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
444 // for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
445 // Another solution could be to count the number of temps?
446 NAsInteger = n == Dynamic ? HugeCost : n,
447 CostEval = (NAsInteger+1) * ScalarReadCost + CoeffReadCost,
448 CostNoEval = NAsInteger * CoeffReadCost,
449 Evaluate = (int(evaluator<T>::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval))
450 };
451
452 typedef typename conditional<Evaluate, PlainObject, typename ref_selector<T>::type>::type type;
453};
454
455template<typename T>
456EIGEN_DEVICE_FUNC
457inline T* const_cast_ptr(const T* ptr)
458{
459 return const_cast<T*>(ptr);
460}
461
462template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
463struct dense_xpr_base
464{
465 /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
466};
467
468template<typename Derived>
469struct dense_xpr_base<Derived, MatrixXpr>
470{
471 typedef MatrixBase<Derived> type;
472};
473
474template<typename Derived>
475struct dense_xpr_base<Derived, ArrayXpr>
476{
477 typedef ArrayBase<Derived> type;
478};
479
480template<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>
481struct generic_xpr_base;
482
483template<typename Derived, typename XprKind>
484struct generic_xpr_base<Derived, XprKind, Dense>
485{
486 typedef typename dense_xpr_base<Derived,XprKind>::type type;
487};
488
489template<typename XprType, typename CastType> struct cast_return_type
490{
491 typedef typename XprType::Scalar CurrentScalarType;
492 typedef typename remove_all<CastType>::type _CastType;
493 typedef typename _CastType::Scalar NewScalarType;
494 typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
495 const XprType&,CastType>::type type;
496};
497
498template <typename A, typename B> struct promote_storage_type;
499
500template <typename A> struct promote_storage_type<A,A>
501{
502 typedef A ret;
503};
504template <typename A> struct promote_storage_type<A, const A>
505{
506 typedef A ret;
507};
508template <typename A> struct promote_storage_type<const A, A>
509{
510 typedef A ret;
511};
512
513/** \internal Specify the "storage kind" of applying a coefficient-wise
514 * binary operations between two expressions of kinds A and B respectively.
515 * The template parameter Functor permits to specialize the resulting storage kind wrt to
516 * the functor.
517 * The default rules are as follows:
518 * \code
519 * A op A -> A
520 * A op dense -> dense
521 * dense op B -> dense
522 * sparse op dense -> sparse
523 * dense op sparse -> sparse
524 * \endcode
525 */
526template <typename A, typename B, typename Functor> struct cwise_promote_storage_type;
527
528template <typename A, typename Functor> struct cwise_promote_storage_type<A,A,Functor> { typedef A ret; };
529template <typename Functor> struct cwise_promote_storage_type<Dense,Dense,Functor> { typedef Dense ret; };
530template <typename A, typename Functor> struct cwise_promote_storage_type<A,Dense,Functor> { typedef Dense ret; };
531template <typename B, typename Functor> struct cwise_promote_storage_type<Dense,B,Functor> { typedef Dense ret; };
532template <typename Functor> struct cwise_promote_storage_type<Sparse,Dense,Functor> { typedef Sparse ret; };
533template <typename Functor> struct cwise_promote_storage_type<Dense,Sparse,Functor> { typedef Sparse ret; };
534
535template <typename LhsKind, typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order {
536 enum { value = LhsOrder };
537};
538
539template <typename LhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<LhsKind,Sparse,LhsOrder,RhsOrder> { enum { value = RhsOrder }; };
540template <typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<Sparse,RhsKind,LhsOrder,RhsOrder> { enum { value = LhsOrder }; };
541template <int Order> struct cwise_promote_storage_order<Sparse,Sparse,Order,Order> { enum { value = Order }; };
542
543
544/** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B.
545 * The template parameter ProductTag permits to specialize the resulting storage kind wrt to
546 * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct.
547 * The default rules are as follows:
548 * \code
549 * K * K -> K
550 * dense * K -> dense
551 * K * dense -> dense
552 * diag * K -> K
553 * K * diag -> K
554 * Perm * K -> K
555 * K * Perm -> K
556 * \endcode
557 */
558template <typename A, typename B, int ProductTag> struct product_promote_storage_type;
559
560template <typename A, int ProductTag> struct product_promote_storage_type<A, A, ProductTag> { typedef A ret;};
561template <int ProductTag> struct product_promote_storage_type<Dense, Dense, ProductTag> { typedef Dense ret;};
562template <typename A, int ProductTag> struct product_promote_storage_type<A, Dense, ProductTag> { typedef Dense ret; };
563template <typename B, int ProductTag> struct product_promote_storage_type<Dense, B, ProductTag> { typedef Dense ret; };
564
565template <typename A, int ProductTag> struct product_promote_storage_type<A, DiagonalShape, ProductTag> { typedef A ret; };
566template <typename B, int ProductTag> struct product_promote_storage_type<DiagonalShape, B, ProductTag> { typedef B ret; };
567template <int ProductTag> struct product_promote_storage_type<Dense, DiagonalShape, ProductTag> { typedef Dense ret; };
568template <int ProductTag> struct product_promote_storage_type<DiagonalShape, Dense, ProductTag> { typedef Dense ret; };
569
570template <typename A, int ProductTag> struct product_promote_storage_type<A, PermutationStorage, ProductTag> { typedef A ret; };
571template <typename B, int ProductTag> struct product_promote_storage_type<PermutationStorage, B, ProductTag> { typedef B ret; };
572template <int ProductTag> struct product_promote_storage_type<Dense, PermutationStorage, ProductTag> { typedef Dense ret; };
573template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Dense, ProductTag> { typedef Dense ret; };
574
575/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
576 * \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
577 */
578template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
579struct plain_row_type
580{
581 typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
582 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
583 typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
584 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
585
586 typedef typename conditional<
587 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
588 MatrixRowType,
589 ArrayRowType
590 >::type type;
591};
592
593template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
594struct plain_col_type
595{
596 typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
597 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
598 typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
599 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
600
601 typedef typename conditional<
602 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
603 MatrixColType,
604 ArrayColType
605 >::type type;
606};
607
608template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
609struct plain_diag_type
610{
611 enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
612 max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
613 };
614 typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
615 typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
616
617 typedef typename conditional<
618 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
619 MatrixDiagType,
620 ArrayDiagType
621 >::type type;
622};
623
624template<typename Expr,typename Scalar = typename Expr::Scalar>
625struct plain_constant_type
626{
627 enum { Options = (traits<Expr>::Flags&RowMajorBit)?RowMajor:0 };
628
629 typedef Array<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
630 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> array_type;
631
632 typedef Matrix<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
633 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> matrix_type;
634
635 typedef CwiseNullaryOp<scalar_constant_op<Scalar>, const typename conditional<is_same< typename traits<Expr>::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type;
636};
637
638template<typename ExpressionType>
639struct is_lvalue
640{
641 enum { value = (!bool(is_const<ExpressionType>::value)) &&
642 bool(traits<ExpressionType>::Flags & LvalueBit) };
643};
644
645template<typename T> struct is_diagonal
646{ enum { ret = false }; };
647
648template<typename T> struct is_diagonal<DiagonalBase<T> >
649{ enum { ret = true }; };
650
651template<typename T> struct is_diagonal<DiagonalWrapper<T> >
652{ enum { ret = true }; };
653
654template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
655{ enum { ret = true }; };
656
657template<typename S1, typename S2> struct glue_shapes;
658template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type; };
659
660template<typename T1, typename T2>
661bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<has_direct_access<T1>::ret&&has_direct_access<T2>::ret, T1>::type * = 0)
662{
663 return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
664}
665
666template<typename T1, typename T2>
667bool is_same_dense(const T1 &, const T2 &, typename enable_if<!(has_direct_access<T1>::ret&&has_direct_access<T2>::ret), T1>::type * = 0)
668{
669 return false;
670}
671
672// Internal helper defining the cost of a scalar division for the type T.
673// The default heuristic can be specialized for each scalar type and architecture.
674template<typename T,bool Vectorized=false,typename EnaleIf = void>
675struct scalar_div_cost {
676 enum { value = 8*NumTraits<T>::MulCost };
677};
678
679template<typename T,bool Vectorized>
680struct scalar_div_cost<std::complex<T>, Vectorized> {
681 enum { value = 2*scalar_div_cost<T>::value
682 + 6*NumTraits<T>::MulCost
683 + 3*NumTraits<T>::AddCost
684 };
685};
686
687
688template<bool Vectorized>
689struct scalar_div_cost<signed long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 24 }; };
690template<bool Vectorized>
691struct scalar_div_cost<unsigned long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 21 }; };
692
693
694#ifdef EIGEN_DEBUG_ASSIGN
695std::string demangle_traversal(int t)
696{
697 if(t==DefaultTraversal) return "DefaultTraversal";
698 if(t==LinearTraversal) return "LinearTraversal";
699 if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal";
700 if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal";
701 if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal";
702 return "?";
703}
704std::string demangle_unrolling(int t)
705{
706 if(t==NoUnrolling) return "NoUnrolling";
707 if(t==InnerUnrolling) return "InnerUnrolling";
708 if(t==CompleteUnrolling) return "CompleteUnrolling";
709 return "?";
710}
711std::string demangle_flags(int f)
712{
713 std::string res;
714 if(f&RowMajorBit) res += " | RowMajor";
715 if(f&PacketAccessBit) res += " | Packet";
716 if(f&LinearAccessBit) res += " | Linear";
717 if(f&LvalueBit) res += " | Lvalue";
718 if(f&DirectAccessBit) res += " | Direct";
719 if(f&NestByRefBit) res += " | NestByRef";
720 if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit";
721
722 return res;
723}
724#endif
725
726} // end namespace internal
727
728
729/** \class ScalarBinaryOpTraits
730 * \ingroup Core_Module
731 *
732 * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.
733 *
734 * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations.
735 *
736 * For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3.
737 * You can let %Eigen knows that by defining:
738 \code
739 template<typename BinaryOp>
740 struct ScalarBinaryOpTraits<U1,U2,BinaryOp> { typedef U3 ReturnType; };
741 template<typename BinaryOp>
742 struct ScalarBinaryOpTraits<U2,U1,BinaryOp> { typedef U3 ReturnType; };
743 \endcode
744 * You can then explicitly disable some particular operations to get more explicit error messages:
745 \code
746 template<>
747 struct ScalarBinaryOpTraits<U1,U2,internal::scalar_max_op<U1,U2> > {};
748 \endcode
749 * Or customize the return type for individual operation:
750 \code
751 template<>
752 struct ScalarBinaryOpTraits<U1,U2,internal::scalar_sum_op<U1,U2> > { typedef U1 ReturnType; };
753 \endcode
754 *
755 * By default, the following generic combinations are supported:
756 <table class="manual">
757 <tr><th>ScalarA</th><th>ScalarB</th><th>BinaryOp</th><th>ReturnType</th><th>Note</th></tr>
758 <tr ><td>\c T </td><td>\c T </td><td>\c * </td><td>\c T </td><td></td></tr>
759 <tr class="alt"><td>\c NumTraits<T>::Real </td><td>\c T </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
760 <tr ><td>\c T </td><td>\c NumTraits<T>::Real </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
761 </table>
762 *
763 * \sa CwiseBinaryOp
764 */
765template<typename ScalarA, typename ScalarB, typename BinaryOp=internal::scalar_product_op<ScalarA,ScalarB> >
766struct ScalarBinaryOpTraits
767#ifndef EIGEN_PARSED_BY_DOXYGEN
768 // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.
769 : internal::scalar_product_traits<ScalarA,ScalarB>
770#endif // EIGEN_PARSED_BY_DOXYGEN
771{};
772
773template<typename T, typename BinaryOp>
774struct ScalarBinaryOpTraits<T,T,BinaryOp>
775{
776 typedef T ReturnType;
777};
778
779template <typename T, typename BinaryOp>
780struct ScalarBinaryOpTraits<T, typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, BinaryOp>
781{
782 typedef T ReturnType;
783};
784template <typename T, typename BinaryOp>
785struct ScalarBinaryOpTraits<typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, T, BinaryOp>
786{
787 typedef T ReturnType;
788};
789
790// For Matrix * Permutation
791template<typename T, typename BinaryOp>
792struct ScalarBinaryOpTraits<T,void,BinaryOp>
793{
794 typedef T ReturnType;
795};
796
797// For Permutation * Matrix
798template<typename T, typename BinaryOp>
799struct ScalarBinaryOpTraits<void,T,BinaryOp>
800{
801 typedef T ReturnType;
802};
803
804// for Permutation*Permutation
805template<typename BinaryOp>
806struct ScalarBinaryOpTraits<void,void,BinaryOp>
807{
808 typedef void ReturnType;
809};
810
811// We require Lhs and Rhs to have "compatible" scalar types.
812// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
813// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
814// add together a float matrix and a double matrix.
815#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
816 EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \
817 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
818
819} // end namespace Eigen
820
821#endif // EIGEN_XPRHELPER_H
822