MADNESS  version 0.9
gentensor.h
Go to the documentation of this file.
1 /*
2  This file is part of MADNESS.
3 
4  Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20  For more information please contact:
21 
22  Robert J. Harrison
23  Oak Ridge National Laboratory
24  One Bethel Valley Road
25  P.O. Box 2008, MS-6367
26 
27  email: harrisonrj@ornl.gov
28  tel: 865-241-3937
29  fax: 865-572-0680
30 
31  $Id$
32 */
33 
34 #ifndef GENTENSOR_H_
35 #define GENTENSOR_H_
36 
37 
114 #include <madness/tensor/tensor.h>
115 #include <madness/tensor/srconf.h>
117 #include <stdexcept>
118 
119 namespace madness {
120 
121  // a forward declaration
122  template <class T> class SliceGenTensor;
123  template <class T> class GenTensor;
124 
125 
127  struct TensorArgs {
128  double thresh;
130  TensorArgs() : thresh(-1.0), tt(TT_NONE) {}
131  TensorArgs(const double& thresh1, const TensorType& tt1)
132  : thresh(thresh1)
133  , tt(tt1) {
134  }
135  static std::string what_am_i(const TensorType& tt) {
136  if (tt==TT_2D) return "TT_2D";
137  if (tt==TT_FULL) return "TT_FULL";
138  return "unknown tensor type";
139  }
140  template <typename Archive>
141  void serialize(const Archive& ar) {
142  int i=int(tt);
143  ar & thresh & i;
144  tt=TensorType(i);
145  }
146  };
147 
148 
150  template<typename T>
151  bool has_zero_rank(const GenTensor<T>& g) {
152  return ((g.rank()==0) or (g.tensor_type()==TT_NONE));
153  }
154 
155 
156 #if !HAVE_GENTENSOR
157 
158  template <typename T>
159  class GenTensor : public Tensor<T> {
160 
161  public:
162 
163  GenTensor<T>() : Tensor<T>() {}
164 
165  GenTensor<T>(const Tensor<T>& t1) : Tensor<T>(t1) {}
166  GenTensor<T>(const Tensor<T>& t1, const TensorArgs& targs) : Tensor<T>(t1) {}
167  GenTensor<T>(const Tensor<T>& t1, double eps, const TensorType tt) : Tensor<T>(t1) {}
168  GenTensor<T>(const TensorType tt): Tensor<T>() {}
169  GenTensor<T>(std::vector<long> v, const TensorType& tt) : Tensor<T>(v) {}
170  GenTensor<T>(std::vector<long> v, const TensorArgs& targs) : Tensor<T>(v) {}
171  GenTensor<T>(const SRConf<T>& sr1) : Tensor<T>() {MADNESS_EXCEPTION("no ctor with SRConf: use HAVE_GENTENSOR",1);}
172 
174  template <class Q> operator GenTensor<Q>() const { // type conv => deep copy
175  Tensor<Q> result = Tensor<Q>(this->_ndim,this->_dim,false);
176  BINARY_OPTIMIZED_ITERATOR(Q, result, const T, (*this), *_p0 = (Q)(*_p1));
177  return result;
178  }
179 
180 
181  GenTensor<T> reconstruct_tensor() const {return *this;}
182  GenTensor<T> full_tensor() const {return *this;}
183  GenTensor<T>& full_tensor() {return *this;}
184  GenTensor<T> full_tensor_copy() const {return *this;}
185  GenTensor<T> full_tensor_copy() {return *this;}
186 
187  bool has_data() const {return this->size()>0;};
188  bool has_no_data() const {return not has_data();};
189  long rank() const {return -1;}
190  double svd_normf() const {return this->normf();}
191  size_t real_size() const {return this->size();}
192 
193  void reduce_rank(const double& eps) {return;};
194  void normalize() {return;}
195 
196  std::string what_am_i() const {return "GenTensor, aliased to Tensor";};
197  TensorType tensor_type() const {return TT_FULL;}
198 
199  void add_SVD(const GenTensor<T>& rhs, const double& eps) {*this+=rhs;}
200 
201  SRConf<T> config() const {MADNESS_EXCEPTION("no SRConf in complex GenTensor",1);}
202  SRConf<T> get_configs(const int& start, const int& end) const {MADNESS_EXCEPTION("no SRConf in complex GenTensor",1);}
203 
204  template<typename Q>
205  GenTensor<T> general_transform(const Tensor<Q> c[]) const {
206 
207  return madness::general_transform(*static_cast<const Tensor<T>*>(this),c);
208  }
209 
210 
212  static double fac_reduce() {return -1.0;};
213 
214  };
215 
216  template <class T>
217  GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
218  typedef typename std::list<GenTensor<T> >::iterator iterT;
219  GenTensor<T> result=copy(addends.front());
220  for (iterT it=++addends.begin(); it!=addends.end(); ++it) {
221  result+=*it;
222  }
223  return result;
224  }
225 
227 
229  template <class T>
230  GenTensor<T> outer(const GenTensor<T>& left, const GenTensor<T>& right) {
231  return madness::outer(static_cast<Tensor<T> >(left),static_cast<Tensor<T> >(right));
232  }
233 
235 
237  template <class T>
238  GenTensor<T> outer_low_rank(const Tensor<T>& left, const Tensor<T>& right) {
239  return madness::outer(left,right);
240  }
241 
242 
243  namespace archive {
245  template <class Archive, typename T>
246  struct ArchiveStoreImpl< Archive, GenTensor<T> > {
247  static void store(const Archive& s, const GenTensor<T>& t) {
248  if (t.iscontiguous()) {
249  s & t.size() & t.id();
250  if (t.size()) s & t.ndim() & wrap(t.dims(),TENSOR_MAXDIM) & wrap(t.ptr(),t.size());
251  }
252  else {
253  s & copy(t);
254  }
255  };
256  };
257 
258 
260  template <class Archive, typename T>
261  struct ArchiveLoadImpl< Archive, GenTensor<T> > {
262  static void load(const Archive& s, GenTensor<T>& t) {
263  long sz = 0l, id =0l;
264  s & sz & id;
265  if (id != t.id()) throw "type mismatch deserializing a tensor";
266  if (sz) {
267  long _ndim=0l, _dim[TENSOR_MAXDIM];
268  s & _ndim & wrap(_dim,TENSOR_MAXDIM);
269  t = Tensor<T>(_ndim, _dim, false);
270  if (sz != t.size()) throw "size mismatch deserializing a tensor";
271  s & wrap(t.ptr(), t.size());
272  }
273  else {
274  t = Tensor<T>();
275  }
276  };
277  };
278 
279  }
280 
281 #else
282 
283 
285 
295  template<typename T>
296  class GenTensor {
297 
298  private:
299 
300  friend class SliceGenTensor<T>;
301 
302  typedef SRConf<T> configT;
303  typedef Tensor<T> tensorT;
304  typedef GenTensor<T> gentensorT;
305  typedef std::shared_ptr<configT> sr_ptr;
306 
308  sr_ptr _ptr;
309 
311  static double machinePrecision() {return 1.e-14;}
312 
314  static double facReduce() {return 1.e-3;}
315 
316  public:
317 
319  GenTensor() : _ptr() {
320  }
321 
323 // GenTensor(const GenTensor<T>& rhs) : _ptr(rhs._ptr) { // DON'T DO THIS: USE_COUNT BLOWS UP
324  GenTensor(const GenTensor<T>& rhs) : _ptr() {
325  if (rhs.has_data()) _ptr=rhs._ptr;
326  };
327 
329  GenTensor(const std::vector<long>& dim, const TensorType tt) : _ptr() {
330 
331  // check consistency
332  const long ndim=dim.size();
333  const long maxk=dim[0];
334  for (long idim=0; idim<ndim; idim++) {
335  MADNESS_ASSERT(maxk==dim[0]);
336  }
337 
338  _ptr=sr_ptr(new configT(dim.size(),dim[0],tt));
339 
340  }
341 
343  GenTensor(const std::vector<long>& dim, const TensorArgs& targs) : _ptr() {
344 
345  // check consistency
346  const long ndim=dim.size();
347  const long maxk=dim[0];
348  for (long idim=0; idim<ndim; idim++) {
349  MADNESS_ASSERT(maxk==dim[0]);
350  }
351 
352  _ptr=sr_ptr(new configT(dim.size(),dim[0],targs.tt));
353 
354  }
355 
357  GenTensor(const TensorType& tt, const unsigned int& k, const unsigned int& dim) {
358  _ptr=sr_ptr(new configT(dim,k,tt));
359  }
360 
362  GenTensor(const Tensor<T>& rhs, const TensorArgs& targs) {
363 
364  // fast return if possible
365  if (not rhs.has_data()) {
366  _ptr.reset();
367  return;
368  }
369 
370  MADNESS_ASSERT(rhs.ndim()>0);
371  for (long idim=0; idim<rhs.ndim(); idim++) {
372  MADNESS_ASSERT(rhs.dim(0)==rhs.dim(idim));
373  }
374 
375  _ptr=sr_ptr(new configT(rhs.ndim(),rhs.dim(0),targs.tt));
376 
377  // direct reduction on the polynomial values on the Tensor
378  TensorType ttype=tensor_type();
379  if (ttype==TT_2D) {
380 
381 #if 1
382  Tensor<T> U,VT;
383  Tensor< typename Tensor<T>::scalar_type > s;
384 
385  // add some extra dimensions if this is supposedly NS form:
386  // separate sum and wavelet coeffs
387  if (rhs.dim(0)%2==0) {
388  std::vector<long> dims(rhs.ndim()*2);
389  for (int i=0; i<rhs.ndim(); ++i) {
390  int k=rhs.dim(i);
391  dims[2*i]=k/2;
392  dims[2*i+1]=2;
393  }
394  TensorTrain<T> tt(rhs,targs.thresh*facReduce(),dims);
395  // fuse sum and wavelet coeffs back together
396  for (int i=0; i<rhs.ndim(); ++i) tt.fusedim(i);
397  // fuse dimensions into particles 1 and 2
398  tt.two_mode_representation(U,VT,s);
399 
400  } else {
401  TensorTrain<T> tt(rhs,targs.thresh*facReduce());
402  tt.two_mode_representation(U,VT,s);
403  }
404 
405  const long r=VT.dim(0);
406  const long nd=VT.ndim();
407  if (r==0) {
408  _ptr=sr_ptr(new configT(dim(),get_k(),ttype));
409  } else {
410  MADNESS_ASSERT(U.dim(nd-1)==r);
411  Tensor<T> UU=U.reshape(U.size()/r,r);
412  _ptr=sr_ptr(new configT(s, copy(transpose(UU)), VT.reshape(r,VT.size()/r),
413  dim(), get_k()));
414  this->normalize();
415  }
416 #else
417  // adapt form of values
418  MADNESS_ASSERT(rhs.iscontiguous());
419  std::vector<long> d(_ptr->dim_eff(),_ptr->kVec());
420  Tensor<T> values_eff=rhs.reshape(d);
421 
422  this->computeSVD(targs.thresh,values_eff);
423 #endif
424  } else if (ttype==TT_FULL){
425  _ptr.reset(new configT(copy(rhs)));
426  } else {
427  MADNESS_EXCEPTION("unknown tensor type in GenTensor(tensorT,targs)",0);
428  }
429  }
430 
432  GenTensor(const Tensor<T>& rhs, const double& thresh, const TensorType& tt) {
433  *this=gentensorT(rhs,TensorArgs(thresh,tt));
434  }
435 
437  GenTensor(const SliceGenTensor<T>& rhs) : _ptr() {
438  *this=rhs;
439  }
440 
442  ~GenTensor() {
443  this->clear();
444  }
445 
447  gentensorT& operator=(const gentensorT& rhs) {
448  if (this != &rhs) _ptr=rhs._ptr;
449  return *this;
450  }
451 
453  GenTensor& operator=(const SliceGenTensor<T>& rhs) {
454  *this=rhs._refGT.copy_slice(rhs._s);
455  return *this;
456  }
457 
459  friend gentensorT copy(const gentensorT& rhs) {
460  if (rhs._ptr) return gentensorT(copy(*rhs._ptr));
461  return gentensorT();
462  }
463 
464 
466  template <class Q> operator GenTensor<Q>() const { // type conv => deep copy
467  GenTensor<Q> result;
468  MADNESS_EXCEPTION("no type conversion in GenTensor -- use NO_GENTENSOR",1);
469  return result;
470  }
471 
474  const GenTensor get_configs(const int& start, const int& end) const {
475  return gentensorT(config().get_configs(start,end));
476  }
477 
479  SliceGenTensor<T> operator()(const std::vector<Slice>& s) {
480  return SliceGenTensor<T>(*this,s);
481  }
482 
484  const SliceGenTensor<T> operator()(const std::vector<Slice>& s) const {
485  return SliceGenTensor<T>(*this,s);
486  }
487 
489  GenTensor& operator=(const T& fac) {
490  MADNESS_EXCEPTION("no assignment with a number for GenTensor",0);
491  }
492 
493  private:
495  GenTensor(const Tensor<T>& t) {
496  print("in wrong constructor");
497  }
498  public:
499 
501  explicit GenTensor(const SRConf<T>& config) : _ptr(new configT(config)) {
502  }
503 
504  private:
506  gentensorT copy_slice(const std::vector<Slice>& s) const {
507 
508  // fast return if possible
509  if (this->has_no_data()) {
510  int k_new=s[0].end-s[0].start+1;
511  return gentensorT (this->tensor_type(),k_new,this->dim());
512  }
513 
514  // consistency check
515  MADNESS_ASSERT(s.size()==this->dim());
516  MADNESS_ASSERT(s[0].step==1);
517 
518  // fast return for full rank tensors
519  if (tensor_type()==TT_FULL) {
520  tensorT a=copy(full_tensor()(s));
521  return gentensorT(configT(a));
522  }
523 
524  MADNESS_ASSERT(_ptr->has_structure());
525 // _ptr->make_structure();
526 
527  // get dimensions
528  const TensorType tt=this->tensor_type();
529  const int merged_dim=this->_ptr->dim_per_vector();
530  const int dim_eff=this->_ptr->dim_eff();
531  const int rank=this->rank();
532  int k_new=s[0].end-s[0].start+1;
533  if (s[0].end<0) k_new+=this->get_k();
534 
535  // get and reshape the vectors, slice and re-reshape again;
536  // this is shallow
537  const gentensorT& sr=*this;
538 
539  std::vector<Tensor<T> > vectors(dim_eff,Tensor<T>());
540 
541  for (int idim=0; idim<dim_eff; idim++) {
542 
543  // assignment from/to slice is deep-copy
544  if (merged_dim==1) {
545  if (rank>0) {
546  vectors[idim]=copy(sr._ptr->ref_vector(idim)(Slice(0,rank-1),s[idim]));
547  } else {
548  vectors[idim]=Tensor<T>(0,s[idim].end-s[idim].start+1);
549  }
550  } else if (merged_dim==2) {
551  if (rank>0) {
552  vectors[idim]=copy(sr._ptr->ref_vector(idim)(Slice(0,rank-1),s[2*idim],s[2*idim+1]));
553  } else {
554  vectors[idim]=tensorT(0,s[2*idim].end-s[2*idim].start+1,
555  s[2*idim+1].end-s[2*idim+1].start+1);
556  }
557  } else if (merged_dim==3) {
558  if (rank>0) {
559  vectors[idim]=copy(sr._ptr->ref_vector(idim)(Slice(0,rank-1),
560  s[3*idim],s[3*idim+1],s[3*idim+2]));
561  } else {
562  vectors[idim]=tensorT(0,s[3*idim].end-s[3*idim].start+1,
563  s[3*idim+1].end-s[3*idim+1].start+1,
564  s[3*idim+2].end-s[3*idim+2].start+1);
565 
566  }
567  } else MADNESS_EXCEPTION("unknown number of dimensions in GenTensor::copy_slice()",0);
568  }
569 
570  // work-around for rank==0
571  Tensor<double> weights;
572  if (rank>0) {
573  weights=copy(this->_ptr->weights_(Slice(0,rank-1)));
574  } else {
575  weights=Tensor<double>(int(0));
576  }
577  const configT conf(weights,vectors,this->dim(),k_new,tt);
578 
579  return gentensorT(conf);
580 
581  }
582 
583  public:
584 
586  const tensorT& full_tensor() const {
587  MADNESS_ASSERT(tensor_type()==TT_FULL);
588  return _ptr->ref_vector(0);
589  }
590 
592  tensorT& full_tensor() {
593  MADNESS_ASSERT(tensor_type()==TT_FULL);
594  return _ptr->ref_vector(0);
595  }
596 
598  gentensorT& operator+=(const gentensorT& rhs) {
599 
600  if (rhs.has_no_data()) return *this;
601  if (this->has_no_data()) {
602  *this=copy(rhs);
603  return *this;
604  }
605  this->gaxpy(1.0,rhs,1.0);
606  return *this;
607  }
608 
610  gentensorT& operator-=(const gentensorT& rhs) {
611 
612  if (rhs.has_no_data()) return *this;
613  if (this->has_no_data()) {
614  *this=copy(rhs);
615  return *this;
616  }
617  this->gaxpy(1.0,rhs,-1.0);
618  return *this;
619  }
620 
622  gentensorT& operator+=(const SliceGenTensor<T>& rhs) {
623  const std::vector<Slice> s(this->ndim(),Slice(0,get_k()-1,1));
624  this->_ptr->inplace_add(*rhs._refGT._ptr,s,rhs._s,1.0,1.0);
625  return *this;
626  }
627 
629  gentensorT& operator-=(const SliceGenTensor<T>& rhs) {
630  const std::vector<Slice> s(this->ndim(),Slice(0,get_k()-1,1));
631  this->_ptr->inplace_add(*rhs._refGT._ptr,s,rhs._s,1.0,-1.0);
632  return *this;
633  }
634 
636  template<typename Q>
637  GenTensor<TENSOR_RESULT_TYPE(T,Q)> operator*(const Q& x) const {
638  GenTensor<TENSOR_RESULT_TYPE(T,Q)> result(copy(*this));
639  result.scale(x);
640  return result;
641  }
642 
644  gentensorT& gaxpy(const T alpha, const gentensorT& rhs, const T beta) {
645  MADNESS_ASSERT(this->tensor_type()==rhs.tensor_type());
646 
647  if (tensor_type()==TT_FULL) {
648  full_tensor().gaxpy(alpha,rhs.full_tensor(),beta);
649  return *this;
650  }
651  if (not (alpha==1.0)) this->scale(alpha);
652  rhs.append(*this,beta);
653  return *this;
654  }
655 
657  template<typename Q>
658  GenTensor<TENSOR_RESULT_TYPE(T,Q)>& scale(const Q& dfac) {
659  if (!_ptr) return *this;
660  if (tensor_type()==TT_FULL) {
661  full_tensor().scale(dfac);
662  } else {
663  _ptr->scale(dfac);
664  }
665  return *this;
666  };
667 
669  void normalize() {
670  if (tensor_type()==TT_2D) _ptr->normalize();
671  };
672 
673  void fillrandom(const int r=1) {
674  if (tensor_type()==TT_FULL) full_tensor().fillrandom();
675  else _ptr->fillWithRandom(r);
676  }
677 
679  bool has_data() const {
680  if (_ptr) return true;
681  return false;
682  }
683 
685  bool has_no_data() const {return (!has_data());}
686 
688  long rank() const {
689  if (_ptr) return _ptr->rank();
690  else return 0;
691  };
692 
694  unsigned int dim() const {return _ptr->dim();};
695 
697  long dim(const int& i) const {return _ptr->get_k();};
698 
700  long ndim() const {
701  if (_ptr) return _ptr->dim();
702  return -1;
703  };
704 
706  unsigned int get_k() const {return _ptr->get_k();};
707 
709  TensorType tensor_type() const {
710  if (_ptr) return _ptr->type();
711  return TT_NONE;
712  };
713 
715  std::string what_am_i() const {return TensorArgs::what_am_i(_ptr->type());};
716 
718  size_t size() const {
719  if (_ptr) return _ptr->nCoeff();
720  return 0;
721  };
722 
724  size_t real_size() const {
725  if (_ptr) return _ptr->real_size()+sizeof(*this);
726  return 0;
727  };
728 
730  double normf() const {
731  if (has_no_data()) return 0.0;
732  return config().normf();
733  };
734 
736  double svd_normf() const {
737  if (has_no_data()) return 0.0;
738  if (tensor_type()==TT_2D) return config().svd_normf();
739  return config().normf();
740  };
741 
743  T trace(const GenTensor<T>& rhs) const {
744  return this->trace_conj(rhs);
745  }
746 
748  template<typename Q>
749  TENSOR_RESULT_TYPE(T,Q) trace_conj(const GenTensor<Q>& rhs) const {
750  if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
751  if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
752 
753  typedef TENSOR_RESULT_TYPE(T,Q) resultT;
754  // fast return if possible
755  if ((this->rank()==0) or (rhs.rank()==0)) return resultT(0.0);
756 
757  MADNESS_ASSERT(compatible(*this,rhs));
758  MADNESS_ASSERT(this->tensor_type()==rhs.tensor_type());
759 
760  return overlap(*(this->_ptr),*rhs._ptr);
761  }
762 
764  template<typename Q>
765  TENSOR_RESULT_TYPE(T,Q) trace_conj(const Tensor<Q>& rhs) const {
766  if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
767  if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
768 
769  typedef TENSOR_RESULT_TYPE(T,Q) resultT;
770  // fast return if possible
771  if ((this->rank()==0)) return resultT(0.0);
772 
773  // fast return if this is a full tensor
774  // otherwise reconstruct this to a full tensor, since it's presumably
775  // faster than turning the full tensor rhs into a low-rank approximation
776  if (this->tensor_type()==TT_FULL) return full_tensor().trace_conj(rhs);
777  else return full_tensor_copy().trace_conj(rhs);
778 
779  }
780 
782  GenTensor<T>& emul(const GenTensor<T>& t) {
783  print("no GenTensor<T>::emul yet");
784  MADNESS_ASSERT(0);
785  }
786 
788  Tensor<T> full_tensor_copy() const {
789  const TensorType tt=tensor_type();
790  if (tt==TT_NONE) return Tensor<T>();
791  else if (tt==TT_2D) return this->reconstruct_tensor();
792  else if (tt==TT_FULL) {
793  return copy(full_tensor());
794  } else {
795  print(TensorArgs::what_am_i(tt),"unknown tensor type");
796  MADNESS_ASSERT(0);
797  }
798  }
799 
801  void reduce_rank(const double& eps) {
802 
803  if (rank()==0) return;
804 
805  // direct reduction on the polynomial values on the Tensor
806  if (tensor_type()==TT_FULL or tensor_type()==TT_NONE) {
807  return;
808  } else if (this->tensor_type()==TT_2D) {
809  config().divide_and_conquer_reduce(eps*facReduce());
810  } else {
811  MADNESS_EXCEPTION("unknown tensor type in GenTensor::reduceRank()",0);
812  }
813  MADNESS_ASSERT(this->_ptr->has_structure() or this->rank()==0);
814  }
815 
817  void printCoeff(const std::string title) const {
818  print("printing SepRep",title);
819  print(_ptr->weights_);
820  for (unsigned int idim=0; idim<this->_ptr->dim_eff(); idim++) {
821  print("coefficients for dimension",idim);
822  print(_ptr->vector_[idim]);
823  }
824  }
825 
827  Tensor<T> reconstruct_tensor() const {
828 
829  /*
830  * reconstruct the tensor first to the configurational dimension,
831  * then to the real dimension
832  */
833 
834  // fast return for full rank tensors
835  if (tensor_type()==TT_FULL) return full_tensor();
836 
837  // for convenience
838  const unsigned int conf_dim=this->_ptr->dim_eff();
839  const unsigned int conf_k=this->_ptr->kVec(); // possibly k,k*k,..
840  const long rank=this->rank();
841  long d[TENSOR_MAXDIM];
842 
843  // fast return if possible
844  if (rank==0) {
845  // reshape the tensor to the really required one
846  const long k=this->get_k();
847  const long dim=this->dim();
848  for (long i=0; i<dim; i++) d[i] = k;
849 
850  return Tensor<T> (dim,d,true);
851  }
852 
853 
854  // set up result Tensor (in configurational dimensions)
855  for (long i=0; i<conf_dim; i++) d[i] = conf_k;
856  tensorT s(conf_dim,d,true);
857 
858  // flatten this
859  gentensorT sr=*this;
860 
861  // and a scratch Tensor
862  Tensor<T> scr(rank);
863  Tensor<T> scr1(rank);
864  Tensor<T> scr2(rank);
865 
866  if (conf_dim==1) {
867 
868  for (unsigned int i0=0; i0<conf_k; i0++) {
869  scr=sr._ptr->weights_;
870  // scr.emul(F[0][i0]);
871  T buffer=scr.sum();
872  s(i0)=buffer;
873  }
874 
875  } else if (conf_dim==2) {
876 
877 
878  // tensorT weight_matrix(rank,rank);
879  // for (unsigned int r=0; r<rank; r++) {
880  // weight_matrix(r,r)=this->weight(r);
881  // }
882  // s=inner(weight_matrix,sr._ptr->refVector(0));
883  // s=inner(s,sr._ptr->refVector(1),0,0);
884 // tensorT sscr=copy(sr._ptr->ref_vector(0)(sr._ptr->c0()));
885  tensorT sscr=copy(sr._ptr->flat_vector(0));
886  for (unsigned int r=0; r<rank; r++) {
887  const double w=_ptr->weights(r);
888  for (unsigned int k=0; k<conf_k; k++) {
889  sscr(r,k)*=w;
890  }
891  }
892  inner_result(sscr,sr._ptr->flat_vector(1),0,0,s);
893 
894 
895  } else if (conf_dim==3) {
896 
897  MADNESS_EXCEPTION("flat/structure issue",1);
898  for (unsigned int i0=0; i0<conf_k; i0++) {
899  scr=copy(sr._ptr->weights_(Slice(0,sr.rank()-1)));
900  scr.emul(sr._ptr->ref_vector(0)(Slice(0,rank-1),i0));
901  for (unsigned int i1=0; i1<conf_k; i1++) {
902  scr1=copy(scr);
903  scr1.emul(sr._ptr->ref_vector(1)(Slice(0,rank-1),i1));
904  for (unsigned int i2=0; i2<conf_k; i2++) {
905  scr2=copy(scr1);
906  scr2.emul(sr._ptr->ref_vector(2)(Slice(0,rank-1),i2));
907  s(i0,i1,i2)=scr2.sum();
908  }
909  }
910  }
911  } else {
912  print("only config_dim=1,2,3 in GenTensor::reconstructTensor");
913  MADNESS_ASSERT(0);
914  }
915 
916 
917  // reshape the tensor to the really required one
918  const long k=this->get_k();
919  const long dim=this->dim();
920  for (long i=0; i<dim; i++) d[i] = k;
921 
922  Tensor<T> s2=s.reshape(dim,d);
923  return s2;
924  }
925 
927  void append(gentensorT& rhs, const T fac=1.0) const {
928  rhs.config().append(*this->_ptr,fac);
929  }
930 
932  void add_SVD(const gentensorT& rhs, const double& thresh) {
933  if (rhs.has_no_data()) return;
934  if (has_no_data()) {
935  *this=rhs;
936  return;
937  }
938  if (tensor_type()==TT_FULL or tensor_type()==TT_NONE) {
939  this->full_tensor()+=rhs.full_tensor();
940  return;
941  }
942  config().add_SVD(rhs.config(),thresh*facReduce());
943  }
944 
946  friend bool compatible(const gentensorT& rhs, const gentensorT& lhs) {
947  return ((rhs.tensor_type()==lhs.tensor_type()) and (rhs.get_k()==lhs.get_k())
948  and (rhs.dim()==lhs.dim()));
949  };
950 
952  gentensorT transform(const Tensor<T> c) const {
953 // _ptr->make_structure();
954  if (has_no_data()) return gentensorT();
955  MADNESS_ASSERT(_ptr->has_structure());
956  return gentensorT (this->_ptr->transform(c));
957  }
958 
960  template<typename Q>
961  gentensorT general_transform(const Tensor<Q> c[]) const {
962 // this->_ptr->make_structure();
963  if (has_no_data()) return gentensorT();
964  MADNESS_ASSERT(_ptr->has_structure());
965  return gentensorT (this->config().general_transform(c));
966  }
967 
969  gentensorT transform_dir(const Tensor<T>& c, const int& axis) const {
970 // this->_ptr->make_structure();
971  MADNESS_ASSERT(_ptr->has_structure());
972  return GenTensor<T>(this->_ptr->transform_dir(c,axis));
973  }
974 
976  const SRConf<T>& config() const {return *_ptr;}
977 
979  SRConf<T>& config() {return *_ptr;}
980 
982  static double fac_reduce() {return facReduce();};
983 
984  private:
985 
987  void clear() {_ptr.reset();};
988 
990  void inplace_add(const gentensorT& rhs, const std::vector<Slice>& lhs_s,
991  const std::vector<Slice>& rhs_s, const double alpha, const double beta) {
992 
993  // fast return if possible
994  if (rhs.has_no_data() or rhs.rank()==0) return;
995 
996  if (this->has_data()) MADNESS_ASSERT(this->tensor_type()==rhs.tensor_type());
997 
998  // no fast return possible!!!
999  // if (this->rank()==0) {
1000  // // this is a deep copy
1001  // *this=rhs(rhs_s);
1002  // this->scale(beta);
1003  // return;
1004  // }
1005 
1006  if (tensor_type()==TT_FULL) {
1007  full_tensor()(lhs_s).gaxpy(alpha,rhs.full_tensor()(rhs_s),beta);
1008  } else {
1009 // rhs._ptr->make_structure();
1010 // _ptr->make_structure();
1011  MADNESS_ASSERT(_ptr->has_structure());
1012  MADNESS_ASSERT(rhs._ptr->has_structure());
1013  this->_ptr->inplace_add(*rhs._ptr,lhs_s,rhs_s, alpha, beta);
1014  }
1015  }
1016 
1018  void computeSVD(const double& eps,const Tensor<T>& values_eff) {
1019 
1020  // SVD works only with matrices (2D)
1021  MADNESS_ASSERT(values_eff.ndim()==2);
1022  MADNESS_ASSERT(this->tensor_type()==TT_2D);
1023 
1024  // fast return if possible
1025  if (values_eff.normf()<eps*facReduce()) {
1026  _ptr=sr_ptr(new configT(_ptr->dim(),_ptr->get_k(),tensor_type()));
1027  return;
1028  }
1029 
1030  // output from svd
1031  Tensor<T> U;
1032  Tensor<T> VT;
1033  Tensor< typename Tensor<T>::scalar_type > s;
1034 
1035  svd(values_eff,U,s,VT);
1036 
1037  // find the maximal singular value that's supposed to contribute
1038  // singular values are ordered (largest first)
1039  const double thresh=eps*facReduce();
1040  long i=SRConf<T>::max_sigma(thresh,s.dim(0),s);
1041 
1042  // convert SVD output to our convention
1043  if (i>=0) {
1044  // copy to have contiguous and tailored singular vectors
1045  _ptr=sr_ptr(new configT(copy(s(Slice(0,i))), copy(transpose(U(_,Slice(0,i)))),
1046  copy(VT(Slice(0,i),_)), dim(), get_k()));
1047  MADNESS_ASSERT(this->_ptr->get_k()==this->_ptr->vector_[0].dim(1));
1048  MADNESS_ASSERT(this->_ptr->rank()==this->_ptr->vector_[0].dim(0));
1049  MADNESS_ASSERT(this->_ptr->rank()==this->_ptr->weights_.dim(0));
1050 
1051  } else {
1052  _ptr=sr_ptr(new configT(dim(),get_k(),tensor_type()));
1053  }
1054  }
1055  };
1056 
1057 
1058 
1060  template <typename T>
1061  class SliceGenTensor {
1062 
1063  private:
1064  friend class GenTensor<T>;
1065 
1066  GenTensor<T>& _refGT;
1067  std::vector<Slice> _s;
1068 
1069  // all ctors are private, only accessible by GenTensor
1070 
1072  SliceGenTensor<T> () {}
1073 
1075  SliceGenTensor<T> (const GenTensor<T>& gt, const std::vector<Slice>& s)
1076  : _refGT(const_cast<GenTensor<T>& >(gt))
1077  , _s(s) {}
1078 
1079  public:
1080 
1082  SliceGenTensor<T>& operator=(const GenTensor<T>& rhs) {
1083  print("You don't want to assign to a SliceGenTensor; use operator+= instead");
1084  MADNESS_ASSERT(0);
1085  return *this;
1086  };
1087 
1089  SliceGenTensor<T>& operator=(const SliceGenTensor<T>& rhs) {
1090  print("You don't want to assign to a SliceGenTensor; use operator+= instead");
1091  MADNESS_ASSERT(0);
1092  return *this;
1093  };
1094 
1096  SliceGenTensor<T>& operator+=(const GenTensor<T>& rhs) {
1097  std::vector<Slice> s(this->_refGT.ndim(),Slice(_));
1098  _refGT.inplace_add(rhs,this->_s,s,1.0,1.0);
1099  return *this;
1100  }
1101 
1103  SliceGenTensor<T>& operator-=(const GenTensor<T>& rhs) {
1104  std::vector<Slice> s(this->_refGT.ndim(),Slice(_));
1105  _refGT.inplace_add(rhs,this->_s,s,1.0,-1.0);
1106  return *this;
1107  }
1108 
1110  SliceGenTensor<T>& operator+=(const SliceGenTensor<T>& rhs) {
1111  _refGT.inplace_add(GenTensor<T>(*rhs._refGT._ptr),this->_s,rhs._s,1.0,1.0);
1112  return *this;
1113  }
1114 
1116  SliceGenTensor<T>& operator=(const T& number) {
1117  MADNESS_ASSERT(number==T(0.0));
1118  if (this->_refGT.tensor_type()==TT_FULL) {
1119  _refGT.full_tensor()(_s)=0.0;
1120  } else {
1121  const GenTensor<T>& tmp=(this->_refGT);
1122  _refGT.inplace_add(tmp,_s,_s,1.0,-1.0);
1123  }
1124  return *this;
1125  }
1126 
1128  friend GenTensor<T> copy(const SliceGenTensor<T>& rhs) {
1129  if (rhs._refGT.has_data()) return GenTensor<T>(rhs);
1130  return GenTensor<T>();
1131  }
1132 
1133  };
1134 
1135 
1141  template <class T, class Q>
1142  GenTensor< TENSOR_RESULT_TYPE(T,Q) > transform(const GenTensor<Q>& t, const Tensor<T>& c) {
1143  return t.transform(c);
1144  }
1145 
1147  struct RankReduceWrapper {
1148  double eps;
1149  RankReduceWrapper(const double& e) : eps(e) {}
1150 
1151  template<typename T>
1152  void operator()(GenTensor<T>& g) const {
1153  g.reduce_rank(eps);
1154  }
1155  };
1156 
1158  template<typename T>
1159  bool compare_rank(const GenTensor<T>& g1, const GenTensor<T>& g2) {
1160  return g1.rank()<g2.rank();
1161  }
1162 
1163 
1165 
1172  template<typename T>
1173  GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
1174 
1175  // fast return
1176  if (addends.size()==0) return GenTensor<T>();
1177 
1178  typedef typename std::list<GenTensor<T> >::iterator iterT;
1179 
1180  // make error relative
1181  eps=eps/addends.size();
1182 
1183  // if the addends are not in SVD format do that now so that we can call add_svd later
1184  if (not are_optimal) {
1185  RankReduceWrapper f(eps);
1186  std::for_each(addends.begin(),addends.end(),f);
1187  }
1188 
1189  // remove zero ranks and sort the list according to the gentensor's ranks
1190  addends.remove_if(has_zero_rank<T>);
1191  if (addends.size()==0) return GenTensor<T>();
1192  addends.sort(compare_rank<T>);
1193 
1194  // do the additions
1195  GenTensor<T> result=copy(addends.front());
1196  for (iterT it=++addends.begin(); it!=addends.end(); ++it) {
1197  GenTensor<T>& rhs=*it;
1198  MADNESS_ASSERT(&rhs!=&result);
1199  result.add_SVD(rhs,eps);
1200  }
1201  addends.clear();
1202 
1203  return result;
1204  }
1205 
1207 
1216  template <class T, class Q>
1217  GenTensor<TENSOR_RESULT_TYPE(T,Q)> transform_dir(const GenTensor<Q>& t, const Tensor<T>& c,
1218  int axis) {
1219 
1220  return t.transform_dir(c,axis);
1221  }
1222 
1223  template<typename T>
1224  static inline
1225  std::ostream& operator<<(std::ostream& s, const GenTensor<T>& g) {
1226  std::string str="GenTensor has no data";
1227  if (g.has_no_data()) {
1228  std::string str="GenTensor has no data";
1229  s << str.c_str() ;
1230  } else {
1231  str="GenTensor has data";
1232  s << str.c_str() << g.config();
1233  }
1234  return s;
1235  }
1236 
1237  namespace archive {
1239  template <class Archive, typename T>
1240  struct ArchiveStoreImpl< Archive, GenTensor<T> > {
1241 
1242  friend class GenTensor<T>;
1244  static void store(const Archive& ar, const GenTensor<T>& t) {
1245  bool exist=t.has_data();
1246  ar & exist;
1247  if (exist) ar & t.config();
1248  };
1249  };
1250 
1251 
1253  template <class Archive, typename T>
1254  struct ArchiveLoadImpl< Archive, GenTensor<T> > {
1255 
1256  friend class GenTensor<T>;
1258  static void load(const Archive& ar, GenTensor<T>& t) {
1259  // check for pointer existence
1260  bool exist=false;
1261  ar & exist;
1262  if (exist) {
1263  SRConf<T> conf;
1264  ar & conf;
1265  //t.config()=conf;
1266  t=GenTensor<T>(conf);
1267  }
1268  };
1269  };
1270  };
1271 
1273  template <class T, class Q>
1274  GenTensor<TENSOR_RESULT_TYPE(T,Q)> outer(const GenTensor<T>& lhs2, const GenTensor<Q>& rhs2) {
1275  return outer_low_rank(lhs2.full_tensor(),rhs2.full_tensor());
1276  }
1277 
1279  template <class T, class Q>
1280  GenTensor<TENSOR_RESULT_TYPE(T,Q)> outer_low_rank(const Tensor<T>& lhs2, const Tensor<Q>& rhs2) {
1281 
1282  typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1283 
1284  // srconf is shallow, do deep copy here
1285  const Tensor<T> lhs=copy(lhs2);
1286  const Tensor<Q> rhs=copy(rhs2);
1287 
1288  const long k=lhs.dim(0);
1289  const long dim=lhs.ndim()+rhs.ndim();
1290  long size=1;
1291  for (int i=0; i<lhs.ndim(); ++i) size*=k;
1292  MADNESS_ASSERT(size==lhs.size());
1293  MADNESS_ASSERT(size==rhs.size());
1294  MADNESS_ASSERT(lhs.size()==rhs.size());
1295 
1296  Tensor<double> weights(1);
1297  weights=1.0;
1298 
1299  SRConf<resultT> srconf(weights,lhs.reshape(1,lhs.size()),rhs.reshape(1,rhs.size()),dim,k);
1300  GenTensor<resultT> coeff(srconf);
1301  coeff.normalize();
1302  return coeff;
1303  }
1304 
1305  #endif /* HAVE_GENTENSOR */
1306 
1308  template<typename T>
1309  void change_tensor_type(GenTensor<T>& t, const TensorArgs& targs) {
1310 
1311  // fast return if possible
1312  const TensorType current_type=t.tensor_type();
1313  if (current_type==targs.tt) return;
1314  if (t.has_no_data()) return;
1315 
1316  // for now
1317  MADNESS_ASSERT(targs.tt==TT_FULL or targs.tt==TT_2D);
1318  MADNESS_ASSERT(current_type==TT_FULL or current_type==TT_2D);
1319 
1320  GenTensor<T> result;
1321  if (targs.tt==TT_FULL) {
1322  result=GenTensor<T>(t.full_tensor_copy(),targs);
1323  } else if (targs.tt==TT_2D) {
1324  MADNESS_ASSERT(current_type==TT_FULL);
1325  result=GenTensor<T>(t.full_tensor(),targs);
1326  }
1327 
1328  t=result;
1329 
1330  }
1331 
1333 
1341  template <class T, class Q>
1343  return t.general_transform(c);
1344  }
1345 
1346  template <class T>
1347  GenTensor<T> general_transform(const GenTensor<T>& t, const Tensor<T> c[]) {
1348  return t.general_transform(c);
1349  }
1350 
1352  template <typename T, typename Q>
1353  typename IsSupported < TensorTypeData<Q>, GenTensor<T> >::type
1354  operator*(const Q& x, const GenTensor<T>& t) {
1355  return t*x;
1356  }
1357 
1358 
1359 
1360 }
1361 
1362 #endif /* GENTENSOR_H_ */
SRConf< T > config() const
Definition: gentensor.h:201
Definition: shared_ptr_bits.h:38
SRConf< T > get_configs(const int &start, const int &end) const
Definition: gentensor.h:202
NDIM const Function< R, NDIM > & g
Definition: mra.h:2179
GenTensor< T > full_tensor_copy() const
Definition: gentensor.h:184
void gaxpy(World &world, Q alpha, std::vector< Function< T, NDIM > > &a, Q beta, const std::vector< Function< R, NDIM > > &b, bool fence=true)
Generalized A*X+Y for vectors of functions -— a[i] = alpha*a[i] + beta*b[i].
Definition: vmra.h:680
TensorType
low rank representations of tensors (see gentensor.h)
Definition: tensor.h:275
long ndim() const
Returns the number of dimensions in the tensor.
Definition: basetensor.h:144
void inner_result(const Tensor< T > &left, const Tensor< Q > &right, long k0, long k1, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result)
Accumulate inner product into user provided, contiguous, correctly sized result tensor.
Definition: tensor.h:2204
IsSupported< TensorTypeData< Q >, GenTensor< T > >::type operator*(const Q &x, const GenTensor< T > &t)
The class defines tensor op scalar ... here define scalar op tensor.
Definition: gentensor.h:1354
static void load(const Archive &s, GenTensor< T > &t)
Definition: gentensor.h:262
bool has_no_data() const
Definition: gentensor.h:188
archive_array< T > wrap(const T *, unsigned int)
Factory function to wrap dynamically allocated pointer as typed archive_array.
Definition: archive.h:820
GenTensor< T > outer(const GenTensor< T > &left, const GenTensor< T > &right)
Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
Definition: gentensor.h:230
double svd_normf() const
Definition: gentensor.h:190
TensorArgs()
Definition: gentensor.h:130
::std::string string
Definition: gtest-port.h:872
void load(World &world, real_function_6d &f, std::string filename)
Definition: helium_exact.cc:367
std::string what_am_i() const
Definition: gentensor.h:196
#define BINARY_OPTIMIZED_ITERATOR(X, x, Y, y, exp)
Definition: tensor_macros.h:701
NDIM & f
Definition: mra.h:2179
long dim(int i) const
Returns the size of dmension i.
Definition: basetensor.h:147
bool has_data() const
Definition: gentensor.h:187
void serialize(const Archive &ar)
Definition: gentensor.h:141
void add_SVD(const GenTensor< T > &rhs, const double &eps)
Definition: gentensor.h:199
const long * dims() const
Returns the array of tensor dimensions.
Definition: basetensor.h:153
Default store of a thingy via serialize(ar,t)
Definition: archive.h:708
const double beta
Definition: gygi_soltion.cc:63
Defines and implements most of Tensor.
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition: type_data.h:209
static int max_sigma(const double &thresh, const int &rank, const Tensor< double > &w)
Definition: srconf.h:125
GenTensor< T > general_transform(const Tensor< Q > c[]) const
Definition: gentensor.h:205
static double fac_reduce()
return the additional safety for rank reduction
Definition: gentensor.h:212
double thresh
Definition: gentensor.h:128
GenTensor< T > reduce(std::list< GenTensor< T > > &addends, double eps, bool are_optimal=false)
Definition: gentensor.h:217
GenTensor< T > full_tensor_copy()
Definition: gentensor.h:185
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
Definition: tensor.h:275
void scale(World &world, std::vector< Function< T, NDIM > > &v, const std::vector< Q > &factors, bool fence=true)
Scales inplace a vector of functions by distinct values.
Definition: vmra.h:290
void change_tensor_type(GenTensor< T > &t, const TensorArgs &targs)
change representation to targ.tt
Definition: gentensor.h:1309
Definition: srconf.h:64
static void store(const Archive &s, const GenTensor< T > &t)
Definition: gentensor.h:247
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence. ...
Definition: mra.h:1835
long _ndim
Number of dimensions (-1=invalid; 0=scalar; >0=tensor)
Definition: basetensor.h:94
Tensor< TENSOR_RESULT_TYPE(T, Q)> transform_dir(const Tensor< T > &t, const Tensor< Q > &c, int axis)
Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor.
Definition: tensor.h:1929
FLOAT a(int j, FLOAT z)
Definition: y1.cc:86
#define TENSOR_MAXDIM
Definition: tensor_macros.h:194
TensorArgs(const double &thresh1, const TensorType &tt1)
Definition: gentensor.h:131
void reduce_rank(const double &eps)
Definition: gentensor.h:193
Default load of a thingy via serialize(ar,t)
Definition: archive.h:718
Tensor< double > tensorT
Definition: chem/distpm.cc:13
TensorArgs holds the arguments for creating a LowRankTensor.
Definition: gentensor.h:127
Definition: tensor.h:275
void svd(const Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT)
Compute the singluar value decomposition of an n-by-m matrix using *gesvd.
Definition: lapack.cc:273
TensorType tensor_type() const
Definition: gentensor.h:197
GenTensor()
Definition: gentensor.h:163
GenTensor< T > full_tensor() const
Definition: gentensor.h:182
GenTensor< T > outer_low_rank(const Tensor< T > &left, const Tensor< T > &right)
Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
Definition: gentensor.h:238
GenTensor< TENSOR_RESULT_TYPE(T, Q)> general_transform(const GenTensor< T > &t, const Tensor< Q > c[])
Transform all dimensions of the tensor t by distinct matrices c.
Definition: gentensor.h:1342
long size() const
Returns the number of elements in the tensor.
Definition: basetensor.h:138
Definition: gentensor.h:122
void normalize()
Definition: gentensor.h:194
bool has_zero_rank(const GenTensor< T > &g)
true if GenTensor has zero rank or no data
Definition: gentensor.h:151
GenTensor< T > & full_tensor()
Definition: gentensor.h:183
long _dim[TENSOR_MAXDIM]
Size of each dimension.
Definition: basetensor.h:96
size_t real_size() const
Definition: gentensor.h:191
Tensor< T > transpose(const Tensor< T > &t)
Returns a new deep copy of the transpose of the input tensor.
Definition: tensor.h:1945
void print(const A &a)
Print a single item to std::cout terminating with new line.
Definition: print.h:122
TensorType tt
Definition: gentensor.h:129
#define MADNESS_EXCEPTION(msg, value)
Definition: worldexc.h:88
double tt1
Definition: lapack.cc:66
Definition: gentensor.h:123
Definition: tensor.h:275
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
const double c
Definition: gfit.cc:200
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const DistributedMatrix< R > &c, bool fence=true)
Definition: chem/SCF.cc:86
handles the low-level details of a separated representation tensor
static std::string what_am_i(const TensorType &tt)
Definition: gentensor.h:135
Defines and implements the tensor train decomposition as described in I.V. Oseledets, Siam J. Sci. Comput. 33, 2295 (2011).
GenTensor< T > reconstruct_tensor() const
Definition: gentensor.h:181
long rank() const
Definition: gentensor.h:189