Eigen  3.4.90 (git rev 67eeba6e720c5745abc77ae6c92ce0a44aa7b7ae)
SparseSelfAdjointView.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
12 
13 #include "./InternalHeaderCheck.h"
14 
15 namespace Eigen {
16 
31 namespace internal {
32 
33 template<typename MatrixType, unsigned int Mode>
34 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
35 };
36 
37 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
38 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
39 
40 template<int Mode,typename MatrixType,int DestOrder>
41 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
42 
43 }
44 
45 template<typename MatrixType, unsigned int Mode_> class SparseSelfAdjointView
46  : public EigenBase<SparseSelfAdjointView<MatrixType,Mode_> >
47 {
48  public:
49 
50  enum {
51  Mode = Mode_,
52  TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
53  RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
54  ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
55  };
56 
58  typedef typename MatrixType::Scalar Scalar;
59  typedef typename MatrixType::StorageIndex StorageIndex;
61  typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
62  typedef internal::remove_all_t<MatrixTypeNested> MatrixTypeNested_;
63 
64  explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
65  {
66  eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
67  }
68 
69  inline Index rows() const { return m_matrix.rows(); }
70  inline Index cols() const { return m_matrix.cols(); }
71 
73  const MatrixTypeNested_& matrix() const { return m_matrix; }
74  std::remove_reference_t<MatrixTypeNested>& matrix() { return m_matrix; }
75 
81  template<typename OtherDerived>
84  {
86  }
87 
93  template<typename OtherDerived> friend
96  {
98  }
99 
101  template<typename OtherDerived>
104  {
106  }
107 
109  template<typename OtherDerived> friend
112  {
114  }
115 
124  template<typename DerivedU>
125  SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
126 
128  // TODO implement twists in a more evaluator friendly fashion
129  SparseSymmetricPermutationProduct<MatrixTypeNested_,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
130  {
131  return SparseSymmetricPermutationProduct<MatrixTypeNested_,Mode>(m_matrix, perm);
132  }
133 
134  template<typename SrcMatrixType,int SrcMode>
135  SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
136  {
137  internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
138  return *this;
139  }
140 
141  SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
142  {
143  PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
144  return *this = src.twistedBy(pnull);
145  }
146 
147  // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
148  EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
149 
150  template<typename SrcMatrixType,unsigned int SrcMode>
151  SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
152  {
153  PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
154  return *this = src.twistedBy(pnull);
155  }
156 
157  void resize(Index rows, Index cols)
158  {
159  EIGEN_ONLY_USED_FOR_DEBUG(rows);
160  EIGEN_ONLY_USED_FOR_DEBUG(cols);
161  eigen_assert(rows == this->rows() && cols == this->cols()
162  && "SparseSelfadjointView::resize() does not actually allow to resize.");
163  }
164 
165  protected:
166 
167  MatrixTypeNested m_matrix;
168  //mutable VectorI m_countPerRow;
169  //mutable VectorI m_countPerCol;
170  private:
171  template<typename Dest> void evalTo(Dest &) const;
172 };
173 
174 /***************************************************************************
175 * Implementation of SparseMatrixBase methods
176 ***************************************************************************/
177 
178 template<typename Derived>
179 template<unsigned int UpLo>
180 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
181 {
182  return SparseSelfAdjointView<const Derived, UpLo>(derived());
183 }
184 
185 template<typename Derived>
186 template<unsigned int UpLo>
187 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
188 {
189  return SparseSelfAdjointView<Derived, UpLo>(derived());
190 }
191 
192 /***************************************************************************
193 * Implementation of SparseSelfAdjointView methods
194 ***************************************************************************/
195 
196 template<typename MatrixType, unsigned int Mode>
197 template<typename DerivedU>
198 SparseSelfAdjointView<MatrixType,Mode>&
199 SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
200 {
201  SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
202  if(alpha==Scalar(0))
203  m_matrix = tmp.template triangularView<Mode>();
204  else
205  m_matrix += alpha * tmp.template triangularView<Mode>();
206 
207  return *this;
208 }
209 
210 namespace internal {
211 
212 // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
213 // in the future selfadjoint-ness should be defined by the expression traits
214 // such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
215 template<typename MatrixType, unsigned int Mode>
216 struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
217 {
218  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
219  typedef SparseSelfAdjointShape Shape;
220 };
221 
222 struct SparseSelfAdjoint2Sparse {};
223 
224 template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
225 template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
226 
227 template< typename DstXprType, typename SrcXprType, typename Functor>
228 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
229 {
230  typedef typename DstXprType::StorageIndex StorageIndex;
231  typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
232 
233  template<typename DestScalar,int StorageOrder>
234  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
235  {
236  internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
237  }
238 
239  // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
240  template<typename DestScalar,int StorageOrder,typename AssignFunc>
241  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
242  {
243  SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
244  run(tmp, src, AssignOpType());
245  call_assignment_no_alias_no_transpose(dst, tmp, func);
246  }
247 
248  template<typename DestScalar,int StorageOrder>
249  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
250  const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
251  {
252  SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
253  run(tmp, src, AssignOpType());
254  dst += tmp;
255  }
256 
257  template<typename DestScalar,int StorageOrder>
258  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
259  const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
260  {
261  SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
262  run(tmp, src, AssignOpType());
263  dst -= tmp;
264  }
265 };
266 
267 } // end namespace internal
268 
269 /***************************************************************************
270 * Implementation of sparse self-adjoint time dense matrix
271 ***************************************************************************/
272 
273 namespace internal {
274 
275 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
276 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
277 {
278  EIGEN_ONLY_USED_FOR_DEBUG(alpha);
279 
280  typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
281  typedef internal::remove_all_t<SparseLhsTypeNested> SparseLhsTypeNestedCleaned;
282  typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
283  typedef typename LhsEval::InnerIterator LhsIterator;
284  typedef typename SparseLhsType::Scalar LhsScalar;
285 
286  enum {
287  LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
288  ProcessFirstHalf =
289  ((Mode&(Upper|Lower))==(Upper|Lower))
290  || ( (Mode&Upper) && !LhsIsRowMajor)
291  || ( (Mode&Lower) && LhsIsRowMajor),
292  ProcessSecondHalf = !ProcessFirstHalf
293  };
294 
295  SparseLhsTypeNested lhs_nested(lhs);
296  LhsEval lhsEval(lhs_nested);
297 
298  // work on one column at once
299  for (Index k=0; k<rhs.cols(); ++k)
300  {
301  for (Index j=0; j<lhs.outerSize(); ++j)
302  {
303  LhsIterator i(lhsEval,j);
304  // handle diagonal coeff
305  if (ProcessSecondHalf)
306  {
307  while (i && i.index()<j) ++i;
308  if(i && i.index()==j)
309  {
310  res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
311  ++i;
312  }
313  }
314 
315  // premultiplied rhs for scatters
316  typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
317  // accumulator for partial scalar product
318  typename DenseResType::Scalar res_j(0);
319  for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
320  {
321  LhsScalar lhs_ij = i.value();
322  if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
323  res_j += lhs_ij * rhs.coeff(i.index(),k);
324  res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
325  }
326  res.coeffRef(j,k) += alpha * res_j;
327 
328  // handle diagonal coeff
329  if (ProcessFirstHalf && i && (i.index()==j))
330  res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
331  }
332  }
333 }
334 
335 
336 template<typename LhsView, typename Rhs, int ProductType>
337 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
338 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
339 {
340  template<typename Dest>
341  static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
342  {
343  typedef typename LhsView::MatrixTypeNested_ Lhs;
344  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
345  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
346  LhsNested lhsNested(lhsView.matrix());
347  RhsNested rhsNested(rhs);
348 
349  internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
350  }
351 };
352 
353 template<typename Lhs, typename RhsView, int ProductType>
354 struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
355 : generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
356 {
357  template<typename Dest>
358  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
359  {
360  typedef typename RhsView::MatrixTypeNested_ Rhs;
361  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
362  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
363  LhsNested lhsNested(lhs);
364  RhsNested rhsNested(rhsView.matrix());
365 
366  // transpose everything
367  Transpose<Dest> dstT(dst);
368  internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
369  }
370 };
371 
372 // NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
373 // TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
374 
375 template<typename LhsView, typename Rhs, int ProductTag>
376 struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
377  : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
378 {
379  typedef Product<LhsView, Rhs, DefaultProduct> XprType;
380  typedef typename XprType::PlainObject PlainObject;
381  typedef evaluator<PlainObject> Base;
382 
383  product_evaluator(const XprType& xpr)
384  : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
385  {
386  internal::construct_at<Base>(this, m_result);
387  generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
388  }
389 
390 protected:
391  typename Rhs::PlainObject m_lhs;
392  PlainObject m_result;
393 };
394 
395 template<typename Lhs, typename RhsView, int ProductTag>
396 struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
397  : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
398 {
399  typedef Product<Lhs, RhsView, DefaultProduct> XprType;
400  typedef typename XprType::PlainObject PlainObject;
401  typedef evaluator<PlainObject> Base;
402 
403  product_evaluator(const XprType& xpr)
404  : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
405  {
406  ::new (static_cast<Base*>(this)) Base(m_result);
407  generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
408  }
409 
410 protected:
411  typename Lhs::PlainObject m_rhs;
412  PlainObject m_result;
413 };
414 
415 } // namespace internal
416 
417 /***************************************************************************
418 * Implementation of symmetric copies and permutations
419 ***************************************************************************/
420 namespace internal {
421 
422 template<int Mode,typename MatrixType,int DestOrder>
423 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
424 {
425  typedef typename MatrixType::StorageIndex StorageIndex;
426  typedef typename MatrixType::Scalar Scalar;
427  typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
428  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
429  typedef evaluator<MatrixType> MatEval;
430  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
431 
432  MatEval matEval(mat);
433  Dest& dest(_dest.derived());
434  enum {
435  StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
436  };
437 
438  Index size = mat.rows();
439  VectorI count;
440  count.resize(size);
441  count.setZero();
442  dest.resize(size,size);
443  for(Index j = 0; j<size; ++j)
444  {
445  Index jp = perm ? perm[j] : j;
446  for(MatIterator it(matEval,j); it; ++it)
447  {
448  Index i = it.index();
449  Index r = it.row();
450  Index c = it.col();
451  Index ip = perm ? perm[i] : i;
452  if(Mode==int(Upper|Lower))
453  count[StorageOrderMatch ? jp : ip]++;
454  else if(r==c)
455  count[ip]++;
456  else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
457  {
458  count[ip]++;
459  count[jp]++;
460  }
461  }
462  }
463  Index nnz = count.sum();
464 
465  // reserve space
466  dest.resizeNonZeros(nnz);
467  dest.outerIndexPtr()[0] = 0;
468  for(Index j=0; j<size; ++j)
469  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
470  for(Index j=0; j<size; ++j)
471  count[j] = dest.outerIndexPtr()[j];
472 
473  // copy data
474  for(StorageIndex j = 0; j<size; ++j)
475  {
476  for(MatIterator it(matEval,j); it; ++it)
477  {
478  StorageIndex i = internal::convert_index<StorageIndex>(it.index());
479  Index r = it.row();
480  Index c = it.col();
481 
482  StorageIndex jp = perm ? perm[j] : j;
483  StorageIndex ip = perm ? perm[i] : i;
484 
485  if(Mode==int(Upper|Lower))
486  {
487  Index k = count[StorageOrderMatch ? jp : ip]++;
488  dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
489  dest.valuePtr()[k] = it.value();
490  }
491  else if(r==c)
492  {
493  Index k = count[ip]++;
494  dest.innerIndexPtr()[k] = ip;
495  dest.valuePtr()[k] = it.value();
496  }
497  else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
498  {
499  if(!StorageOrderMatch)
500  std::swap(ip,jp);
501  Index k = count[jp]++;
502  dest.innerIndexPtr()[k] = ip;
503  dest.valuePtr()[k] = it.value();
504  k = count[ip]++;
505  dest.innerIndexPtr()[k] = jp;
506  dest.valuePtr()[k] = numext::conj(it.value());
507  }
508  }
509  }
510 }
511 
512 template<int SrcMode_,int DstMode_,typename MatrixType,int DstOrder>
513 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
514 {
515  typedef typename MatrixType::StorageIndex StorageIndex;
516  typedef typename MatrixType::Scalar Scalar;
517  SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
518  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
519  typedef evaluator<MatrixType> MatEval;
520  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
521 
522  enum {
523  SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
524  StorageOrderMatch = int(SrcOrder) == int(DstOrder),
525  DstMode = DstOrder==RowMajor ? (DstMode_==Upper ? Lower : Upper) : DstMode_,
526  SrcMode = SrcOrder==RowMajor ? (SrcMode_==Upper ? Lower : Upper) : SrcMode_
527  };
528 
529  MatEval matEval(mat);
530 
531  Index size = mat.rows();
532  VectorI count(size);
533  count.setZero();
534  dest.resize(size,size);
535  for(StorageIndex j = 0; j<size; ++j)
536  {
537  StorageIndex jp = perm ? perm[j] : j;
538  for(MatIterator it(matEval,j); it; ++it)
539  {
540  StorageIndex i = it.index();
541  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
542  continue;
543 
544  StorageIndex ip = perm ? perm[i] : i;
545  count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
546  }
547  }
548  dest.outerIndexPtr()[0] = 0;
549  for(Index j=0; j<size; ++j)
550  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
551  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
552  for(Index j=0; j<size; ++j)
553  count[j] = dest.outerIndexPtr()[j];
554 
555  for(StorageIndex j = 0; j<size; ++j)
556  {
557 
558  for(MatIterator it(matEval,j); it; ++it)
559  {
560  StorageIndex i = it.index();
561  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
562  continue;
563 
564  StorageIndex jp = perm ? perm[j] : j;
565  StorageIndex ip = perm? perm[i] : i;
566 
567  Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
568  dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
569 
570  if(!StorageOrderMatch) std::swap(ip,jp);
571  if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
572  dest.valuePtr()[k] = numext::conj(it.value());
573  else
574  dest.valuePtr()[k] = it.value();
575  }
576  }
577 }
578 
579 }
580 
581 // TODO implement twists in a more evaluator friendly fashion
582 
583 namespace internal {
584 
585 template<typename MatrixType, int Mode>
586 struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
587 };
588 
589 }
590 
591 template<typename MatrixType,int Mode>
592 class SparseSymmetricPermutationProduct
593  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
594 {
595  public:
596  typedef typename MatrixType::Scalar Scalar;
597  typedef typename MatrixType::StorageIndex StorageIndex;
598  enum {
599  RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
600  ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
601  };
602  protected:
603  typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
604  public:
605  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
606  typedef typename MatrixType::Nested MatrixTypeNested;
607  typedef internal::remove_all_t<MatrixTypeNested> NestedExpression;
608 
609  SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
610  : m_matrix(mat), m_perm(perm)
611  {}
612 
613  inline Index rows() const { return m_matrix.rows(); }
614  inline Index cols() const { return m_matrix.cols(); }
615 
616  const NestedExpression& matrix() const { return m_matrix; }
617  const Perm& perm() const { return m_perm; }
618 
619  protected:
620  MatrixTypeNested m_matrix;
621  const Perm& m_perm;
622 
623 };
624 
625 namespace internal {
626 
627 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
628 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
629 {
630  typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
631  typedef typename DstXprType::StorageIndex DstIndex;
632  template<int Options>
633  static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
634  {
635  // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
636  SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
637  internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
638  dst = tmp;
639  }
640 
641  template<typename DestType,unsigned int DestMode>
642  static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
643  {
644  internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
645  }
646 };
647 
648 } // end namespace internal
649 
650 } // end namespace Eigen
651 
652 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
Derived & derived()
Definition: EigenBase.h:48
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:52
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:77
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:30
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:47
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:83
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:111
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:95
SparseSymmetricPermutationProduct< MatrixTypeNested_, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Definition: SparseSelfAdjointView.h:129
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:103
@ Lower
Definition: Constants.h:211
@ Upper
Definition: Constants.h:213
@ ColMajor
Definition: Constants.h:321
@ RowMajor
Definition: Constants.h:323
const unsigned int RowMajorBit
Definition: Constants.h:68
Namespace containing all symbols from the Eigen library.
Definition: Core:139
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:59
Definition: EigenBase.h:32
Derived & derived()
Definition: EigenBase.h:48
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:41