449 void setFromTriplets(
const InputIterators& begin,
const InputIterators& end, DupFunctor dup_func);
451 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
453 template<
typename DupFunctor>
454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
472 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
474 Index oldStart = m_outerIndex[1];
475 m_outerIndex[1] = m_innerNonZeros[0];
476 for(
Index j=1; j<m_outerSize; ++j)
478 Index nextOldStart = m_outerIndex[j+1];
479 Index offset = oldStart - m_outerIndex[j];
482 for(
Index k=0; k<m_innerNonZeros[j]; ++k)
484 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
488 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489 oldStart = nextOldStart;
491 std::free(m_innerNonZeros);
493 m_data.resize(m_outerIndex[m_outerSize]);
500 if(m_innerNonZeros != 0)
503 for (
Index i = 0; i < m_outerSize; i++)
505 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
512 prune(default_prunning_func(reference,epsilon));
522 template<
typename KeepFunc>
523 void prune(
const KeepFunc& keep = KeepFunc())
529 for(
Index j=0; j<m_outerSize; ++j)
531 Index previousStart = m_outerIndex[j];
533 Index end = m_outerIndex[j+1];
534 for(
Index i=previousStart; i<end; ++i)
536 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
538 m_data.value(k) = m_data.value(i);
539 m_data.index(k) = m_data.index(i);
544 m_outerIndex[m_outerSize] = k;
559 if (this->
rows() == rows && this->
cols() == cols)
return;
573 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574 m_innerNonZeros = newInnerNonZeros;
576 for(
Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577 m_innerNonZeros[i] = 0;
579 else if (innerChange < 0)
583 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584 for(
Index i = 0; i < m_outerSize + (std::min)(outerChange,
Index(0)); i++)
585 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586 for(
Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587 m_innerNonZeros[i] = 0;
591 if (m_innerNonZeros && innerChange < 0)
593 for(
Index i = 0; i < m_outerSize + (std::min)(outerChange,
Index(0)); i++)
597 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
601 m_innerSize = newInnerSize;
604 if (outerChange == 0)
608 if (!newOuterIndex) internal::throw_std_bad_alloc();
609 m_outerIndex = newOuterIndex;
612 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613 for(
Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614 m_outerIndex[i] = lastIdx;
616 m_outerSize += outerChange;
629 m_innerSize = IsRowMajor ?
cols :
rows;
631 if (m_outerSize !=
outerSize || m_outerSize==0)
633 std::free(m_outerIndex);
635 if (!m_outerIndex) internal::throw_std_bad_alloc();
641 std::free(m_innerNonZeros);
644 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(
StorageIndex));
655 const ConstDiagonalReturnType
diagonal()
const {
return ConstDiagonalReturnType(*
this); }
661 DiagonalReturnType
diagonal() {
return DiagonalReturnType(*
this); }
665 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
667 check_template_parameters();
673 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
675 check_template_parameters();
680 template<
typename OtherDerived>
682 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
684 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
685 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686 check_template_parameters();
689 *
this = other.derived();
692 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
695 internal::call_assignment_no_alias(*
this, other.derived());
700 template<
typename OtherDerived,
unsigned int UpLo>
701 inline SparseMatrix(
const SparseSelfAdjointView<OtherDerived, UpLo>& other)
702 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
704 check_template_parameters();
705 Base::operator=(other);
710 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
712 check_template_parameters();
717 template<
typename OtherDerived>
719 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
721 check_template_parameters();
722 initAssignment(other);
727 template<
typename OtherDerived>
729 :
Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
731 check_template_parameters();
732 *
this = other.derived();
740 std::swap(m_outerIndex, other.m_outerIndex);
741 std::swap(m_innerSize, other.m_innerSize);
742 std::swap(m_outerSize, other.m_outerSize);
743 std::swap(m_innerNonZeros, other.m_innerNonZeros);
744 m_data.swap(other.m_data);
751 eigen_assert(
rows() ==
cols() &&
"ONLY FOR SQUARED MATRICES");
752 this->m_data.resize(
rows());
756 std::free(m_innerNonZeros);
761 if (other.isRValue())
763 swap(other.const_cast_derived());
765 else if(
this!=&other)
767 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
770 initAssignment(other);
773 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774 m_data = other.m_data;
778 Base::operator=(other);
784#ifndef EIGEN_PARSED_BY_DOXYGEN
785 template<
typename OtherDerived>
786 inline SparseMatrix& operator=(
const EigenBase<OtherDerived>& other)
787 {
return Base::operator=(other.derived()); }
789 template<
typename Lhs,
typename Rhs>
790 inline SparseMatrix& operator=(
const Product<Lhs,Rhs,AliasFreeProduct>& other);
793 template<
typename OtherDerived>
794 EIGEN_DONT_INLINE
SparseMatrix& operator=(
const SparseMatrixBase<OtherDerived>& other);
796 friend std::ostream & operator << (std::ostream & s,
const SparseMatrix& m)
799 s <<
"Nonzero entries:\n";
802 for (Index i=0; i<m.nonZeros(); ++i)
803 s <<
"(" << m.m_data.value(i) <<
"," << m.m_data.index(i) <<
") ";
807 for (Index i=0; i<m.outerSize(); ++i)
809 Index p = m.m_outerIndex[i];
810 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
813 s <<
"(" << m.m_data.value(k) <<
"," << m.m_data.index(k) <<
") ";
815 for (; k<m.m_outerIndex[i+1]; ++k) {
822 s <<
"Outer pointers:\n";
823 for (Index i=0; i<m.outerSize(); ++i) {
824 s << m.m_outerIndex[i] <<
" ";
826 s <<
" $" << std::endl;
827 if(!m.isCompressed())
829 s <<
"Inner non zeros:\n";
830 for (Index i=0; i<m.outerSize(); ++i) {
831 s << m.m_innerNonZeros[i] <<
" ";
833 s <<
" $" << std::endl;
837 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
844 std::free(m_outerIndex);
845 std::free(m_innerNonZeros);
851# ifdef EIGEN_SPARSEMATRIX_PLUGIN
852# include EIGEN_SPARSEMATRIX_PLUGIN
857 template<
typename Other>
858 void initAssignment(
const Other& other)
860 resize(other.rows(), other.cols());
863 std::free(m_innerNonZeros);
870 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
874 class SingletonVector
876 StorageIndex m_index;
877 StorageIndex m_value;
879 typedef StorageIndex value_type;
880 SingletonVector(Index i, Index v)
881 : m_index(convert_index(i)), m_value(convert_index(v))
884 StorageIndex operator[](Index i)
const {
return i==m_index ? m_value : 0; }
889 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
894 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
896 const Index outer = IsRowMajor ? row : col;
897 const Index inner = IsRowMajor ? col : row;
899 eigen_assert(!isCompressed());
900 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
902 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903 m_data.index(p) = convert_index(inner);
904 return (m_data.value(p) = Scalar(0));
907 struct IndexPosPair {
908 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
926 template<
typename DiagXpr,
typename Func>
927 void assignDiagonal(
const DiagXpr diagXpr,
const Func& assignFunc)
929 Index n = diagXpr.size();
931 const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
934 if((this->rows()!=n) || (this->cols()!=n))
938 if(m_data.size()==0 || overwrite)
940 typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941 this->makeCompressed();
942 this->resizeNonZeros(n);
947 internal::call_assignment_no_alias(values, diagXpr, assignFunc);
951 bool isComp = isCompressed();
952 internal::evaluator<DiagXpr> diaEval(diagXpr);
953 std::vector<IndexPosPair> newEntries;
956 for(Index i = 0; i<n; ++i)
958 internal::LowerBoundIndex lb = this->lower_bound(i,i);
963 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
965 else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
968 m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969 m_innerNonZeros[i]++;
970 m_data.value(p) = Scalar(0);
971 m_data.index(p) = StorageIndex(i);
972 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
977 newEntries.push_back(IndexPosPair(i,p));
981 Index n_entries = Index(newEntries.size());
984 Storage newData(m_data.size()+n_entries);
987 for(Index k=0; k<n_entries;++k)
989 Index i = newEntries[k].i;
990 Index p = newEntries[k].p;
991 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993 for(Index j=prev_i;j<i;++j)
994 m_outerIndex[j+1] += k;
996 m_innerNonZeros[i]++;
999 newData.value(p+k) = Scalar(0);
1000 newData.index(p+k) = StorageIndex(i);
1001 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1004 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006 for(Index j=prev_i+1;j<=m_outerSize;++j)
1007 m_outerIndex[j] += n_entries;
1009 m_data.swap(newData);
1015 static void check_template_parameters()
1017 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1021 struct default_prunning_func {
1022 default_prunning_func(
const Scalar& ref,
const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023 inline bool operator() (
const Index&,
const Index&,
const Scalar& value)
const
1025 return !internal::isMuchSmallerThan(value, reference, epsilon);
1034template<
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1035void set_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1037 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038 typedef typename SparseMatrixType::Scalar Scalar;
1039 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1045 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1047 for(InputIterator it(begin); it!=end; ++it)
1049 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050 wi(IsRowMajor ? it->col() : it->row())++;
1055 for(InputIterator it(begin); it!=end; ++it)
1056 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1059 trMat.collapseDuplicates(dup_func);
1106template<
typename Scalar,
int _Options,
typename _StorageIndex>
1107template<
typename InputIterators>
1110 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *
this, internal::scalar_sum_op<Scalar,Scalar>());
1122template<
typename Scalar,
int _Options,
typename _StorageIndex>
1123template<
typename InputIterators,
typename DupFunctor>
1126 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *
this, dup_func);
1246 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1248 const Index outer = IsRowMajor ? row : col;
1249 const Index inner = IsRowMajor ? col : row;
1256 if(m_data.allocatedSize()==0)
1257 m_data.reserve(2*m_innerSize);
1261 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1263 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(
StorageIndex));
1267 StorageIndex end = convert_index(m_data.allocatedSize());
1268 for(
Index j=1; j<=m_outerSize; ++j)
1269 m_outerIndex[j] = end;
1275 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276 for(
Index j=0; j<m_outerSize; ++j)
1277 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1282 Index data_end = m_data.allocatedSize();
1286 if(m_outerIndex[outer]==data_end)
1288 eigen_internal_assert(m_innerNonZeros[outer]==0);
1294 while(j>=0 && m_innerNonZeros[j]==0)
1295 m_outerIndex[j--] = p;
1298 ++m_innerNonZeros[outer];
1299 m_data.append(Scalar(0), inner);
1302 if(data_end != m_data.allocatedSize())
1307 eigen_internal_assert(data_end < m_data.allocatedSize());
1308 StorageIndex new_end = convert_index(m_data.allocatedSize());
1309 for(
Index k=outer+1; k<=m_outerSize; ++k)
1310 if(m_outerIndex[k]==data_end)
1311 m_outerIndex[k] = new_end;
1313 return m_data.value(p);
1318 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1320 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1323 ++m_innerNonZeros[outer];
1324 m_data.resize(m_data.size()+1);
1327 if(data_end != m_data.allocatedSize())
1332 eigen_internal_assert(data_end < m_data.allocatedSize());
1333 StorageIndex new_end = convert_index(m_data.allocatedSize());
1334 for(
Index k=outer+1; k<=m_outerSize; ++k)
1335 if(m_outerIndex[k]==data_end)
1336 m_outerIndex[k] = new_end;
1340 Index startId = m_outerIndex[outer];
1341 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342 while ( (p > startId) && (m_data.index(p-1) > inner) )
1344 m_data.index(p) = m_data.index(p-1);
1345 m_data.value(p) = m_data.value(p-1);
1349 m_data.index(p) = convert_index(inner);
1350 return (m_data.value(p) = Scalar(0));
1353 if(m_data.size() != m_data.allocatedSize())
1356 m_data.resize(m_data.allocatedSize());
1360 return insertUncompressed(row,col);