MADNESS  version 0.9
srconf.h
Go to the documentation of this file.
1 /*
2  This file is part of MADNESS.
3 
4  Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20  For more information please contact:
21 
22  Robert J. Harrison
23  Oak Ridge National Laboratory
24  One Bethel Valley Road
25  P.O. Box 2008, MS-6367
26 
27  email: harrisonrj@ornl.gov
28  tel: 865-241-3937
29  fax: 865-572-0680
30 
31  $Id$
32 */
33 
36 
37 #ifndef SRCONF_H_
38 #define SRCONF_H_
39 
40 //#define BENCH 0
41 
42 #include <madness/world/print.h>
43 #include <madness/tensor/tensor.h>
44 #include <madness/tensor/clapack.h>
46 #include <list>
47 
48 namespace madness {
49 
50 
51 
52 #ifdef BENCH
53  static double time_[30];
54 #endif
55 
56 
57  template <class T> class GenTensor;
58 
63  template <typename T>
64  class SRConf {
65  friend class GenTensor<T>;
66 
68  static unsigned int compute_nvec(const TensorType& tt) {
69  if (tt==TT_FULL) return 1;
70  if (tt==TT_2D) return 2;
71  print("unknown TensorType",tt);
72  MADNESS_ASSERT(0);
73  }
74 
76  typedef typename Tensor<T>::scalar_type scalar_type;
77  public:
78 
79 #ifdef BENCH
80  static double& time(int i) {return time_[i];}
81 #endif
82 
83  typedef Tensor<T> tensorT;
84 
86  static const bool check_orthonormality=false;
87 
89  unsigned int dim_;
90 
92  Tensor< typename Tensor<T>::scalar_type > weights_;
93 
96  std::vector<tensorT> vector_;
97 
99  long rank_;
100 
105  unsigned int maxk_;
106 
109  std::vector<Slice> s_;
110 
113 
114 
115  public:
116 
125  static int max_sigma(const double& thresh, const int& rank, const Tensor<double>& w) {
126 
127  // find the maximal singular value that's supposed to contribute
128  // singular values are ordered (largest first)
129  double residual=0.0;
130  long i;
131  for (i=rank-1; i>=0; i--) {
132  residual+=w(i)*w(i);
133  if (residual>thresh*thresh) break;
134  }
135  return i;
136  }
137 
138 
140  SRConf() : dim_(0), rank_(0), maxk_(0), s_(), tensortype_(TT_NONE) {
141  };
142 
144  SRConf(const unsigned int& dim, const unsigned int& k, const TensorType& tt)
145  : dim_(dim)
146  , rank_(0)
147  , maxk_(k)
148  , s_()
149  , tensortype_(tt) {
150 
151  // make sure dim is integer multiple of requested TT
152  const long nvec=compute_nvec(tt);
153  MADNESS_ASSERT(dim%nvec==0);
154 
155  // construct empty vector
156  weights_=Tensor<double>(int(0));
157  vector_=std::vector<Tensor<T> > (nvec);
158 
159  if (tt==TT_FULL) {
160  vector_[0]=tensorT(std::vector<long>(dim,k));
161  rank_=-1;
162  } else {
163  for (unsigned int idim=0; idim<nvec; idim++) vector_[idim]=Tensor<T>(0,kVec());
164  }
165  make_structure();
166  MADNESS_ASSERT(has_structure());
167  }
168 
170  SRConf(const SRConf& rhs) {
171  *this=rhs;
172  MADNESS_ASSERT(has_structure());
173  }
174 
176  SRConf(const Tensor<double>& weights, const std::vector<Tensor<T> >& vectors,
177  const unsigned int& dim, const unsigned int maxk, const TensorType& tt)
178  : dim_(dim)
179  , maxk_(maxk)
180  , tensortype_(tt) {
181 
182  // consistency check
183  MADNESS_ASSERT(vectors.size()>0);
184  MADNESS_ASSERT(weights.ndim()==1 and weights.dim(0)==vectors[0].dim(0));
185 
186  // compute dimension
187  unsigned int nvec=compute_nvec(tt);
188  MADNESS_ASSERT(vectors.size()==nvec);
189  MADNESS_ASSERT(dim%nvec==0);
190 
191  rank_=weights.dim(0);
192  weights_=weights;
193  vector_=std::vector<Tensor<T> > (long(vectors.size()));
194  for (unsigned int idim=0; idim<vectors.size(); idim++) {
195  vector_[idim]=vectors[idim];
196  }
197  make_slices();
198  MADNESS_ASSERT(has_structure());
199  }
200 
202  SRConf(const tensorT& vector1)
203  : dim_(vector1.ndim())
204  , weights_(Tensor<double>())
205  , rank_(-1)
206  , maxk_(vector1.dim(0))
207  , tensortype_(TT_FULL) {
208 
209  vector_.resize(1);
210  vector_[0]=vector1;
211  MADNESS_ASSERT(has_structure());
212  }
213 
215  SRConf(const Tensor<double>& weights, const tensorT& vector1, const tensorT& vector2,
216  const unsigned int& dim, const unsigned int maxk)
217  : dim_(dim)
218  , maxk_(maxk)
219  , tensortype_(TT_2D) {
220 
221  MADNESS_ASSERT(weights.ndim()==1);
222  MADNESS_ASSERT(vector1.ndim()==2);
223  MADNESS_ASSERT(vector2.ndim()==2);
224  MADNESS_ASSERT(weights.dim(0)==vector1.dim(0));
225  MADNESS_ASSERT(vector2.dim(0)==vector1.dim(0));
226  vector_.resize(2);
227  vector_[0]=vector1;
228  vector_[1]=vector2;
229  weights_=weights;
230  rank_=weights.dim(0);
231  make_structure();
232  make_slices();
233  MADNESS_ASSERT(has_structure());
234  }
235 
237  SRConf& operator=(const SRConf& rhs) {
238 
239  // check for self-assignment
240  if (&rhs==this) return *this;
241 
242  // these always hold
243  dim_=rhs.dim_;
244  tensortype_=rhs.tensortype_;
245  maxk_=rhs.maxk_;
246  s_=rhs.s_;
247 
248  if (rhs.has_no_data()) {
249  // construct empty vector
250  weights_=Tensor<double>(0);
251  vector_=std::vector<Tensor<T> > (rhs.dim_eff());
252  rank_=0;
253  for (unsigned int idim=0; idim<dim_eff(); idim++) vector_[idim]=Tensor<T>(0,long(this->kVec()));
254  make_structure();
255 
256 
257  } else if (rhs.type()==TT_FULL) {
258  weights_=Tensor<double>();
259  rank_=-1;
260  vector_.resize(1);
261  vector_[0]=rhs.ref_vector(0);
262 
263  } else {
264  // assign vectors; shallow copy
265  vector_.resize(rhs.vector_.size());
266  for (unsigned int i=0; i<rhs.vector_.size(); i++) {
267  vector_[i]=rhs.vector_[i];
268  }
269 
270  // shallow copy
271  weights_=(rhs.weights_);
272  rank_=rhs.rank();
273 
274  // consistency check
275  for (unsigned int idim=0; idim<dim_eff(); idim++) {
276  MADNESS_ASSERT(weights_.dim(0)==vector_[idim].dim(0));
277  }
278  }
279  MADNESS_ASSERT(has_structure());
280  return *this;
281  }
282 
285  const SRConf get_configs(const int& start, const int& end) const {
286 
287  MADNESS_ASSERT((start>=0) and (end<=rank()));
288  MADNESS_ASSERT(s_.size()>1);
289  const long nvec=dim_eff();
290  const long dim_pv_eff=s_.size()-1; // #dim per vector minus rank-dim
291 
292  Slice s(start,end);
293  std::vector<tensorT> v(nvec);
294 
295  // slice vectors
296  if (dim_pv_eff==1) {
297  for (long i=0; i<nvec; i++) v[i]=ref_vector(i)(s,_);
298  } else if (dim_pv_eff==2) {
299  for (long i=0; i<nvec; i++) v[i]=ref_vector(i)(s,_,_);
300  } else if (dim_pv_eff==3) {
301  for (long i=0; i<nvec; i++) v[i]=ref_vector(i)(s,_,_,_);
302  } else {
303  MADNESS_EXCEPTION("faulty dim_pv in SRConf::get_configs",0);
304  }
305 
306  SRConf<T> result(weights_(s),v,dim(),get_k(),type());
307  MADNESS_ASSERT(result.has_structure());
308  return result;
309  }
310 
312  ~SRConf() {}
313 
314  template <typename Archive>
315  void serialize(Archive& ar) {
316  int i=int(tensortype_);
317  ar & dim_ & weights_ & vector_ & rank_ & maxk_ & i;
318  tensortype_=TensorType(i);
319  make_slices();
320  MADNESS_ASSERT(has_structure());
321  }
322 
324  TensorType type() const {return tensortype_;};
325 
327  bool has_data() const {
328  if (tensortype_==TT_FULL) return (vector_.size()>0 and vector_[0].has_data());
329  return rank()>0;
330  }
331 
332  private:
333 
335  bool has_no_data() const {return !has_data();}
336 
338  void reserve(long r) {
339 
340  // this should at least hold the current information
341  MADNESS_ASSERT(r>=this->rank());
342  MADNESS_ASSERT(has_data() or vector_.size()>0);
343 
344  // fast return if possible
345  // nothing to be done
346  if (r==0) return;
347  // already large enuff?
348  if (this->vector_[0].dim(0)>=r) return;
349 
350  // to avoid incremental increase of the rank
351  r+=3;
352 
353  // for convenience
354  const long rank=this->rank();
355  const long kvec=this->kVec();
356  const bool had_structure=this->has_structure();
357  if (had_structure) this->undo_structure();
358 
359  // transfer weights
360  Tensor<scalar_type> newWeights(r);
361  if (rank>0) newWeights(Slice(0,rank-1))=weights_(Slice(0,rank-1));
362  std::swap(weights_,newWeights);
363 
364  // transfer vectors
365  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
366 
367  tensorT newVector(r,kvec);
368  if (rank>0) newVector(this->c0())=vector_[idim](this->c0());
369  std::swap(vector_[idim],newVector);
370 
371  }
372  MADNESS_ASSERT(weights_.dim(0)==vector_[0].dim(0));
373  if (had_structure) this->make_structure(true);
374  MADNESS_ASSERT(has_structure());
375 
376  }
377 
379  const std::vector<Slice>& c0() const {
380  MADNESS_ASSERT(s_.size()>0);
381  return s_;
382  }
383 
385  void divide_and_conquer_reduce(const double& thresh) {
386 
387  if (has_no_data()) return;
388  if (rank()==1) {
389  normalize();
390  return;
391  }
392 
393  // divide the SRConf into two
394  const long chunksize=8;
395  if (rank()>chunksize) {
396  SRConf<T> chunk1=this->get_configs(0,rank()/2);
397  SRConf<T> chunk2=this->get_configs(rank()/2+1,rank()-1);
398  chunk1.divide_and_conquer_reduce(thresh*0.5);
399  chunk2.divide_and_conquer_reduce(thresh*0.5);
400 
401  // collect the two SRConfs
402  *this=chunk1;
403  this->add_SVD(chunk2,thresh);
404 
405  } else {
406 
407  // and reduce the rank
408  this->orthonormalize(thresh);
409  }
410  MADNESS_ASSERT(has_structure());
411  }
412 
413  public:
415  void orthonormalize(const double& thresh) {
416 
417  if (type()==TT_FULL) return;
418  if (has_no_data()) return;
419  if (rank()==1) {
420  normalize();
421  return;
422  }
423 #ifdef BENCH
424  double cpu0=wall_time();
425 #endif
426 // vector_[0]=copy(vector_[0](c0()));
427 // vector_[1]=copy(vector_[1](c0()));
428 // weights_=weights_(Slice(0,rank()-1));
429  normalize();
430 #ifdef BENCH
431  double cpu1=wall_time();
432 #endif
433 // this->undo_structure();
434  weights_=weights_(Slice(0,rank()-1));
435 // ortho3(vector_[0],vector_[1],weights_,thresh);
436  tensorT v0=flat_vector(0);
437  tensorT v1=flat_vector(1);
438 #ifdef BENCH
439  double cpu2=wall_time();
440 #endif
441  ortho3(v0,v1,weights_,thresh);
442  std::swap(vector_[0],v0);
443  std::swap(vector_[1],v1);
444 #ifdef BENCH
445  double cpu3=wall_time();
446 #endif
447  rank_=weights_.size();
448  MADNESS_ASSERT(rank_>=0);
449  this->make_structure();
450  make_slices();
451  MADNESS_ASSERT(has_structure());
452 #ifdef BENCH
453  double cpu4=wall_time();
454  SRConf<T>::time(21)+=cpu1-cpu0;
455  SRConf<T>::time(22)+=cpu2-cpu1;
456  SRConf<T>::time(23)+=cpu3-cpu2;
457  SRConf<T>::time(24)+=cpu4-cpu3;
458  SRConf<T>::time(20)+=cpu4-cpu0;
459 #endif
460 
461  }
462 
463  private:
465 
468  void append(const SRConf<T>& rhs, const double fac=1.0) {
469 
470  // fast return if possible
471  if (rhs.has_no_data()) return;
472  if (this->has_no_data()) {
473  *this=copy(rhs);
474  this->scale(fac);
475  return;
476  }
477 
478  const long newRank=this->rank()+rhs.rank();
479  const long lhsRank=this->rank();
480  const long rhsRank=rhs.rank();
481  reserve(newRank);
482 
483  // assign weights
484  this->weights_(Slice(lhsRank,newRank-1))=rhs.weights_(Slice(0,rhsRank-1))*fac;
485  std::vector<Slice> s(dim_per_vector()+1,_);
486  s[0]=Slice(lhsRank,newRank-1);
487 
488  // assign vectors
489  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
490 // vector_[idim](Slice(lhsRank,newRank-1),_)=rhs.vector_[idim](rhs.c0());
491  vector_[idim](s)=rhs.vector_[idim](rhs.c0());
492  }
493 
494  rank_=newRank;
495  make_slices();
496  MADNESS_ASSERT(has_structure());
497 
498  }
499  void append(const SRConf<T>& rhs, const double_complex fac=1.0) {
500  MADNESS_EXCEPTION("no complex in SRConf",1);
501  }
502 
504  void add_SVD(const SRConf<T>& rhs, const double& thresh) {
505 #ifdef BENCH
506  double cpu0=wall_time();
507 #endif
508  if (rhs.has_no_data()) return;
509  if (has_no_data()) {
510  *this=rhs;
511  return;
512  }
513 
514  if (check_orthonormality) check_right_orthonormality();
515  if (check_orthonormality) rhs.check_right_orthonormality();
516 
517  this->undo_structure();
518  ortho5(ref_vector(0),ref_vector(1),weights_,
519  rhs.flat_vector(0),rhs.flat_vector(1),rhs.weights_,thresh);
520  rank_=weights_.size();
521  make_structure();
522  make_slices();
523  MADNESS_ASSERT(has_structure());
524 #ifdef BENCH
525  double cpu1=wall_time();
526  time(25)+=cpu1-cpu0;
527 #endif
528  }
529 
531 
534  void inplace_add(const SRConf<T>& rhs2, std::vector<Slice> lhs_s,
535  std::vector<Slice> rhs_s, const double alpha, const double beta) {
536 
537  // fast return if possible; no fast return for this.rank()==0
538  // since we might work with slices!
539  if (rhs2.has_no_data()) return;
540 
541  // fast return for full rank tensors
542  if (type()==TT_FULL) {
543  vector_[0](lhs_s)+=rhs2.vector_[0](rhs_s);
544  return;
545  }
546 
547  // unflatten this and rhs; shallow wrt vector_
548  SRConf<T>& lhs=*this;
549  const SRConf<T>& rhs=rhs2;
550  if (lhs.has_no_data()) lhs.make_structure(true);
551  MADNESS_ASSERT(lhs.has_structure() or (lhs.has_no_data()));
552  MADNESS_ASSERT(rhs.has_structure());
553 
554  // conflicts with lhs_s ??
555  MADNESS_ASSERT(alpha==1.0);
556 
557  // for convenience
558  const long lhsRank=lhs.rank();
559  const long rhsRank=rhs.rank();
560  const long newRank=lhs.rank()+rhs.rank();
561 
562  const long rhs_k=rhs.get_k();
563  const long lhs_k=lhs.get_k();
564 
565  const long dim_pv=lhs.dim_per_vector();
566 
567  // adapt slices for use
568  for (unsigned int idim=0; idim<lhs.dim(); idim++) {
569  if (lhs_s[idim].end<0) lhs_s[idim].end+=lhs_k;
570  if (rhs_s[idim].end<0) rhs_s[idim].end+=rhs_k;
571  // make sure slices conform
572  MADNESS_ASSERT((lhs_s[idim].end-lhs_s[idim].start) == (rhs_s[idim].end-rhs_s[idim].start));
573  // make sure lhs can actually hold rhs(s)
574  MADNESS_ASSERT(lhs_k>=(rhs_s[idim].end-rhs_s[idim].start+1));
575  }
576 
577  lhs.reserve(newRank);
578 
579  // assign weights, and include factors alpha and beta
580  if (alpha!=1.0) lhs.scale(alpha);
581  lhs.weights_(Slice(lhsRank,newRank-1))=rhs.weights_(Slice(0,rhsRank-1))*beta;
582 
583 
584  // assign vectors
585  for (unsigned int idim=0; idim<lhs.dim_eff(); idim++) {
586 
587  // insert rhs at the right place
588  if (dim_pv==1) {
589  lhs.ref_vector(idim)(Slice(lhsRank,newRank-1),lhs_s[idim])=
590  rhs.ref_vector(idim)(Slice(0,rhsRank-1),rhs_s[idim]);
591 
592  } else if (dim_pv==2) {
593  lhs.ref_vector(idim)(Slice(lhsRank,newRank-1),lhs_s[2*idim],lhs_s[2*idim+1])=
594  rhs.ref_vector(idim)(Slice(0,rhsRank-1),rhs_s[2*idim],rhs_s[2*idim+1]);
595 
596  } else if (dim_pv==3) {
597  lhs.ref_vector(idim)(Slice(lhsRank,newRank-1),lhs_s[3*idim],lhs_s[3*idim+1],lhs_s[3*idim+2])=
598  rhs.ref_vector(idim)(Slice(0,rhsRank-1),rhs_s[3*idim],rhs_s[3*idim+1],rhs_s[3*idim+2]);
599 
600  } else {
601  MADNESS_EXCEPTION("extend dim_pv in srconf::inplace_add",0);
602  }
603  }
604 
605  lhs.rank_=newRank;
606  lhs.make_slices();
607  MADNESS_ASSERT(has_structure());
608  }
609 
611  friend SRConf<T> copy(const SRConf<T>& rhs) {
612 
613  // if rhs is non-existent simply construct a new SRConf
614  if (rhs.has_no_data()) return SRConf<T>(rhs.dim(),rhs.get_k(),rhs.type());
615 
616  if (rhs.type()==TT_FULL) return SRConf<T>(copy(rhs.ref_vector(0)));
617 
618  // pass a copy of the weights and vectors of rhs to ctor
619  std::vector<tensorT> vector(rhs.dim_eff());
620  for (unsigned int idim=0; idim<rhs.dim_eff(); idim++)
621  vector[idim]=copy(rhs.ref_vector(idim)(rhs.c0()));
622 
623  return SRConf<T>(copy(rhs.weights_(Slice(0,rhs.rank()-1))),vector,rhs.dim(),rhs.get_k(),rhs.type());
624  }
625 
627  void make_slices() {
628  if (type()==TT_FULL) return;
629  if (this->has_no_data()) {
630  s_.clear();
631  } else {
632  // first dim is the rank
633  if (vector_[0].ndim()>TENSOR_MAXDIM) {
634  print(*this);
635  MADNESS_EXCEPTION("serializing failed",0);
636  }
637  s_.resize(vector_[0].ndim());
638  s_[0]=Slice(0,this->rank()-1);
639  for (int i=1; i<vector_[0].ndim(); i++) {
640  s_[i] = Slice(_);
641  }
642  }
643  }
644 
645 
646  void make_structure(bool force=false) {
647 
648  // fast return if rank is zero
649  if ((not force) and this->has_no_data()) return;
650  if (type()==TT_FULL) return;
651 
652  const int dim_pv=this->dim_per_vector();
653  MADNESS_ASSERT(dim_pv>0 and dim_pv<=3);
654  int rr=weights_.dim(0); // not the rank!
655  if (weights_.size()==0) rr=0;
656  const int k=this->get_k();
657 
658  // reshape the vectors and adapt the Slices
659  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
660  if (dim_pv==2) this->vector_[idim]=vector_[idim].reshape(rr,k,k);
661  if (dim_pv==3) this->vector_[idim]=vector_[idim].reshape(rr,k,k,k);
662  }
663 
664  this->make_slices();
665 
666  }
667 
668  void undo_structure(bool force=false) {
669 
670  // fast return if rank is zero
671  if ((not force) and this->has_no_data()) return;
672  if (type()==TT_FULL) return;
673 
674  const int dim_pv=this->dim_per_vector();
675  MADNESS_ASSERT(dim_pv>0 and dim_pv<=3);
676  int rr=weights_.dim(0); // not the rank!
677  if (weights_.size()==0) rr=0;
678  const int kvec=this->kVec();
679 
680  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
681  this->vector_[idim]=this->vector_[idim].reshape(rr,kvec);
682  }
683 
684  this->make_slices();
685  }
686 
687  public:
689  Tensor<T>& ref_vector(const unsigned int& idim) {
690  return vector_[idim];
691  }
692 
694  const Tensor<T>& ref_vector(const unsigned int& idim) const {
695  return vector_[idim];
696  }
697 
698  private:
700  const Tensor<T> flat_vector(const unsigned int& idim) const {
701  MADNESS_ASSERT(rank()>0);
702  return vector_[idim](c0()).reshape(rank(),kVec());
703  }
704 
706  Tensor<T> flat_vector(const unsigned int& idim) {
707  MADNESS_ASSERT(rank()>0);
708  return vector_[idim](c0()).reshape(rank(),kVec());
709  }
710 
712  void fillWithRandom(const long& rank=1) {
713 
714  rank_=rank;
715 
716  // assign; note that Slice(0,_) is inclusive
717  weights_=Tensor<double>(rank);
718  weights_=1.0;
719 
720  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
721  vector_[idim]=Tensor<T>(rank_,this->kVec());
722  vector_[idim].fillrandom();
723  }
724 
725  this->normalize();
726  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
727  vector_[idim].scale(madness::RandomValue<T>()*scalar_type(10.0));
728  }
729  weights_(Slice(0,this->rank()-1)).fillrandom().scale(10.0);
730  make_slices();
731  MADNESS_ASSERT(has_structure());
732  }
733 
735  void normalize() {
736 
737  if (type()==TT_FULL) return;
738  if (rank()==0) return;
739  MADNESS_ASSERT(has_structure());
740 
741  // for convenience
742  const unsigned int rank=this->rank();
743  std::vector<Slice> s(dim_per_vector()+1,_);
744 // std::vector<Slice> s(2,_);
745 
746  // we calculate the norm sum_i < F^r_i | F^r_i > for each dimension for each r
747 
748  // loop over all configurations
749  for (unsigned int r=0; r<rank; r++) {
750  s[0]=Slice(r,r);
751  // loop over all dimensions
752  for (unsigned int idim=0; idim<dim_eff(); idim++) {
753 
754 // Tensor<T> config=this->ref_vector(idim)(s);
755 // const double norm=config.normf();
756 // const double fac=norm;
757 // double oofac=1.0/fac;
758 // if (fac<1.e-13) oofac=0.0;
759 // weights_(r)*=fac;
760 // config.scale(oofac);
761 
762 // const double norm=this->ref_vector(idim)(s).normf();
763  const double norm=this->vector_[idim](s).normf();
764  const double fac=norm;
765  double oofac=1.0/fac;
766  if (fac<1.e-13) oofac=0.0;
767  weights_(r)*=fac;
768 // this->ref_vector(idim)(s).scale(oofac);
769 // this->flat_vector(idim)(s).scale(oofac);
770  vector_[idim](s).scale(oofac);
771 
772  }
773  }
774  MADNESS_ASSERT(has_structure());
775  }
776 
778  bool check_right_orthonormality() const {
779 
780  // fast return if possible
781  if (rank()==0) return true;
782 
783  MADNESS_ASSERT(type()==TT_2D);
784 
785  const tensorT t1=ref_vector(1)(c0()).reshape(rank(),kVec());
786  tensorT S=inner(t1,t1,1,1);
787  for (int i=0; i<S.dim(0); i++) S(i,i)-=1.0;
788 
789  // error per matrix element
790  double norm=S.normf();
791  double small=sqrt(norm*norm/S.size());
792  return (small<1.e-13);
793  }
794 
796  bool is_flat() const {
797  return (vector_[0].ndim()==2);
798  }
799  public:
801  bool has_structure() const {
802  return (type()==TT_FULL or has_no_data() or vector_[0].dim(1)==this->get_k());
803  }
804 
805  private:
807  unsigned int dim() const {return dim_;}
808 
810  unsigned int dim_eff() const {return vector_.size();}
811 
813  long rank() const {return rank_;};
814 
816  unsigned int get_k() const {return maxk_;};
817 
819  long kVec() const {
820  const int dimpv=this->dim_per_vector();
821  int kv=1;
822  for (int i=0; i<dimpv; ++i) kv*=this->get_k();
823 // const int kv1= pow(this->get_k(),this->dim_per_vector());
824 // MADNESS_ASSERT(kv==kv1);
825  return kv;
826  }
827 
828  public:
830  int dim_per_vector() const {
831  const int nvec=vector_.size();
832  const int dim=this->dim();
833  MADNESS_ASSERT(dim%nvec==0);
834  return dim/nvec;
835  }
836 
838  double weights(const unsigned int& i) const {return weights_(i);};
839 
840  private:
842  unsigned int nCoeff() const {
843  if (type()==TT_FULL) return ref_vector(0).size();
844  return this->dim_eff()*this->kVec()*this->rank();
845  };
846 
848  size_t real_size() const {
849  size_t n=0;
850  for (size_t i=0; i<vector_.size(); ++i) {
851  n+=vector_[i].size()*sizeof(T) + sizeof(tensorT);
852  }
853  n+=weights_.size()*sizeof(double) + sizeof(Tensor<double>);
854  n+=sizeof(*this);
855  n+=s_.size()*sizeof(Slice);
856  return n;
857  }
858 
860  template<typename Q>
861  friend TENSOR_RESULT_TYPE(T,Q) overlap(const SRConf<T>& rhs, const SRConf<Q>& lhs) {
862 
863  // fast return if either rank is 0
864  if ((lhs.has_no_data()) or (rhs.has_no_data())) return 0.0;
865 
866  /*
867  * the structure of an SRConf is (r,k) or (r,k',k), with
868  * r the slowest index; the overlap is therefore simply calculated
869  * as the matrix multiplication rhs*lhs^T
870  */
871 
872  // some checks
873  MADNESS_ASSERT(rhs.dim()==lhs.dim());
874  MADNESS_ASSERT(rhs.dim()>0);
875 
876  typedef TENSOR_RESULT_TYPE(T,Q) resultT;
877 
878  if (rhs.type()==TT_FULL) {
879  return rhs.ref_vector(0).trace(lhs.ref_vector(0));
880  }
881 
882  const unsigned int dim_eff=rhs.dim_eff();
883 
884  // get the weight matrix
885  Tensor<resultT> weightMatrix=outer(lhs.weights_(Slice(0,lhs.rank()-1)),
886  rhs.weights_(Slice(0,rhs.rank()-1)));
887 
888  // calculate the overlap matrices for each dimension at a time
889  for (unsigned int idim=0; idim<dim_eff; idim++) {
890  const Tensor<T> lhs2=lhs.flat_vector(idim);
891  const Tensor<Q> rhs2=rhs.flat_vector(idim);
892  Tensor<resultT> ovlp(lhs.rank(),rhs.rank());
893  inner_result(lhs2,rhs2,-1,-1,ovlp);
894 
895  // multiply all overlap matrices with the weight matrix
896  weightMatrix.emul(ovlp);
897  }
898 
899  // return weightMatrix;
900  const TENSOR_RESULT_TYPE(T,Q) overlap=weightMatrix.sum();
901  return overlap;
902  }
903 
905  typename TensorTypeData<T>::float_scalar_type svd_normf() const {
906  if (has_no_data()) return 0.0;
907  MADNESS_ASSERT(type()==TT_2D);
908  return weights_(Slice(0,rank()-1)).normf();
909  }
910 
912  typename TensorTypeData<T>::float_scalar_type normf() const {
913 
914  // fast return if possible
915  if (has_no_data()) return 0.0;
916  if (type()==TT_FULL) return ref_vector(0).normf();
917 
918  // some checks
919  MADNESS_ASSERT(dim()>0);
920  MADNESS_ASSERT(not TensorTypeData<T>::iscomplex);
921 
922  // get the weight matrix
923  Tensor<T> weightMatrix=outer(weights_(Slice(0,rank()-1)),weights_(Slice(0,rank()-1)));
924 
925  // calculate the overlap matrices for each dimension at a time
926  for (unsigned int idim=0; idim<dim_eff(); idim++) {
927  const Tensor<T> vec=flat_vector(idim);
928  Tensor<T> ovlp(rank(),rank());
929  inner_result(vec,vec,-1,-1,ovlp);
930 
931  // multiply all overlap matrices with the weight matrix
932  weightMatrix.emul(ovlp);
933  }
934 
935  typedef typename TensorTypeData<T>::float_scalar_type resultT;
936  const resultT overlap=std::abs(weightMatrix.sum());
937  return sqrt(overlap);
938  }
939 
941  void scale(const double& fac) {weights_.scale(fac);};
942 
943  void scale(const double_complex& fac) {
944  MADNESS_EXCEPTION("no complex scaling in SRConf",1);
945  }
946 
948  friend bool compatible(const SRConf& lhs, const SRConf& rhs) {
949  return ((lhs.dim()==rhs.dim()) and (lhs.dim_per_vector()==rhs.dim_per_vector()));
950  }
951 
957  SRConf<T> transform(const Tensor<T>& c) const {
958 
959  // fast return if possible
960  if (this->has_no_data()) {
961  return copy(*this);
962  }
963 
964  // fast return for full rank tensor
965  if (type()==TT_FULL) {
966  return SRConf<T> (madness::transform(this->vector_[0],c));
967  }
968 
969  // copying shrinks the vectors to (r,k,k,..)
970  SRConf<T> result=copy(*this);
971 
972  // make sure this is not flattened
973  MADNESS_ASSERT(this->has_structure());
974 
975  // these two loops go over all physical dimensions (dim = dim_eff * merged_dim)
976  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
977  for (unsigned int jdim=1; jdim<this->ref_vector(idim).ndim(); jdim++) {
978 
979  // note: tricky ordering (jdim is missing): this is actually correct!
980  // note: no slicing necessary, since we've copied this to result (incl shrinking)
981 // result.refVector_struct(idim)=madness::inner(result.refVector_struct(idim),c,1,0);
982  result.ref_vector(idim)=madness::inner(result.ref_vector(idim),c,1,0);
983 
984  }
985  }
986  MADNESS_ASSERT(result.has_structure());
987  return result;
988  }
989 
995  template<typename Q>
996  SRConf<TENSOR_RESULT_TYPE(T,Q) > general_transform(const Tensor<Q> c[]) const {
997 
998  // fast return if possible
999  if (this->has_no_data()) return SRConf<T>(copy(*this));
1000  if (type()==TT_FULL) {
1001  return SRConf<T> (madness::general_transform(this->vector_[0],c));
1002  }
1003 
1004  // copying shrinks the vectors to (r,k,k,..)
1005  SRConf<T> result=copy(*this);
1006 
1007  // make sure this is not flattened
1008  if (not this->has_structure()) {
1009  print("no structure!");
1010  }
1011  MADNESS_ASSERT(this->has_structure());
1012 
1013  long i=0;
1014  // these two loops go over all physical dimensions (dim = dim_eff * merged_dim)
1015  for (unsigned int idim=0; idim<this->dim_eff(); idim++) {
1016  for (unsigned int jdim=1; jdim<this->ref_vector(idim).ndim(); jdim++) {
1017 
1018  // note tricky ordering (jdim is missing): this is actually correct!
1019  // note: no slicing necessary, since we've copied this to result (incl shrinking)
1020  result.ref_vector(idim)=madness::inner(result.ref_vector(idim),c[i],1,0);
1021  i++;
1022 
1023  }
1024  }
1025  MADNESS_ASSERT(result.has_structure());
1026  return result;
1027  }
1028 
1029  SRConf<T> transform_dir(const Tensor<T>& c, const int& axis) const {
1030 
1031  if (this->has_no_data()) {
1032  return SRConf<T>(copy(*this));
1033  }
1034 
1035  // fast return for full rank tensor
1036  if (type()==TT_FULL) {
1037  return SRConf<T> (madness::transform_dir(this->vector_[0],c,axis));
1038  }
1039 
1040  // copying shrinks the vectors to (r,k,k,..)
1041  SRConf<T> result=copy(*this);
1042 
1043  // only a matrix is allowed for c
1044  MADNESS_ASSERT(c.ndim()==2);
1045 
1046  // make sure this is not flattened
1047  MADNESS_ASSERT(this->has_structure());
1048 
1049  // compute idim for accessing the vector_, and the dimension inside vector_
1050  // the +1 on jdim for the rank
1051  const long idim=axis/this->dim_per_vector();
1052  const long jdim=axis%this->dim_per_vector()+1;
1053 
1054  // note: no slicing necessary, since we've copied this to result (incl shrinking)
1055  result.ref_vector(idim)=madness::transform_dir(this->ref_vector(idim),c,jdim);
1056  MADNESS_ASSERT(result.has_structure());
1057 
1058  return result;
1059  }
1060 
1061  };
1062 
1064 
1076  template<typename T>
1077  void ortho3(Tensor<T>& x, Tensor<T>& y, Tensor<double>& weights, const double& thresh) {
1078 
1079 #ifdef BENCH
1080  double cpu0=wall_time();
1081 #endif
1082  typedef Tensor<T> tensorT;
1083 
1084  const long rank=x.dim(0);
1085  const double w_max=weights.absmax()*rank; // max Frobenius norm
1086 
1087 
1088  // overlap of 1 and 2
1089  tensorT S1=inner(x,x,1,1);
1090  tensorT S2=inner(y,y,1,1); // 0.5 / 2.1
1091 #ifdef BENCH
1092  double cpu1=wall_time();
1093  SRConf<T>::time(1)+=(cpu1-cpu0);
1094 #endif
1095 
1096 // print("norm(S1)",S1.normf());
1097 // print("norm(S2)",S2.normf());
1098 
1099  // diagonalize
1100  tensorT U1, U2;
1101  Tensor<double> e1, e2;
1102  syev(S1,U1,e1);
1103  syev(S2,U2,e2); // 2.3 / 4.0
1104 #ifdef BENCH
1105  double cpu3=wall_time();
1106  SRConf<T>::time(3)+=cpu3-cpu1;
1107 #endif
1108 
1109  const double e1_max=e1.absmax();
1110  const double e2_max=e2.absmax();
1111 
1112  // fast return if possible
1113  if ((e1_max*w_max<thresh) or (e2_max*w_max<thresh)) {
1114  x.clear();
1115  y.clear();
1116  weights.clear();
1117  return;
1118  }
1119 
1120  // remove small negative eigenvalues
1121  e1.screen(1.e-13);
1122  e2.screen(1.e-13);
1123  Tensor<double> sqrt_e1(rank), sqrt_e2(rank);
1124 
1125 
1126  // shrink U1, U2
1127  int lo1=0;
1128  int lo2=0;
1129  for (unsigned int r=0; r<rank; r++) {
1130  if (e1(r)*w_max<thresh) lo1=r+1;
1131  if (e2(r)*w_max<thresh) lo2=r+1;
1132  sqrt_e1(r)=sqrt(std::abs(e1(r)));
1133  sqrt_e2(r)=sqrt(std::abs(e2(r)));
1134  }
1135 
1136  U1=U1(Slice(_),Slice(lo1,-1));
1137  U2=U2(Slice(_),Slice(lo2,-1));
1138  sqrt_e1=sqrt_e1(Slice(lo1,-1));
1139  sqrt_e2=sqrt_e2(Slice(lo2,-1));
1140  unsigned int rank1=rank-lo1;
1141  unsigned int rank2=rank-lo2; // 0.0 / 0.0
1142 
1143 
1144  MADNESS_ASSERT(sqrt_e1.size()==rank1);
1145  MADNESS_ASSERT(sqrt_e2.size()==rank2);
1146 #ifdef BENCH
1147  double cpu4=wall_time();
1148  SRConf<T>::time(4)+=cpu4-cpu3;
1149 #endif
1150 
1151 
1152  // set up overlap M; include X+
1153  tensorT M(rank1,rank2);
1154  for (unsigned int i=0; i<rank1; i++) {
1155  for (unsigned int j=0; j<rank2; j++) {
1156  for (unsigned int r=0; r<rank; r++) {
1157  M(i,j)+=U1(r,i)*sqrt_e1(i)*weights(r)*U2(r,j) * sqrt_e2(j);
1158 // M(i,j)+=U1(r,i)*weights(r)*U2(r,j);
1159  }
1160  }
1161  }
1162 
1163 
1164  // include X-
1165  for (unsigned int r=0; r<rank1; r++) {
1166  double fac=1.0/sqrt_e1(r);
1167  for (unsigned int t=0; t<rank; t++) {
1168  U1(t,r)*=fac;
1169 // if (sqrt_e1(r)<thresh) throw;
1170  }
1171  }
1172 
1173  for (unsigned int r=0; r<rank2; r++) {
1174  double fac=1.0/sqrt_e2(r);
1175  for (unsigned int t=0; t<rank; t++) {
1176  U2(t,r)*=fac;
1177 // if (sqrt_e2(r)<thresh) throw;
1178  }
1179  } // 0.2 / 0.6
1180 #ifdef BENCH
1181  double cpu5=wall_time();
1182  SRConf<T>::time(5)+=cpu5-cpu4;
1183 #endif
1184 
1185  // decompose M
1186  tensorT Up,VTp;
1187  Tensor<double> Sp;
1188  svd(M,Up,Sp,VTp); // 1.5 / 3.0
1189 #ifdef BENCH
1190  double cpu6=wall_time();
1191  SRConf<T>::time(6)+=cpu6-cpu5;
1192 #endif
1193 
1194  // make transformation matrices
1195  Up=inner(Up,U1,0,1);
1196  VTp=inner(VTp,U2,1,1);
1197 
1198 
1199 
1200 // // find the maximal singular value that's supposed to contribute
1201 // // singular values are ordered (largest first)
1202 // double residual=0.0;
1203 // long i;
1204 // for (i=Sp.dim(0)-1; i>=0; i--) {
1205 // residual+=Sp(i)*Sp(i);
1206 // if (residual>thresh*thresh) break;
1207 // }
1208  long i=SRConf<T>::max_sigma(thresh,Sp.dim(0),Sp);
1209 
1210 #ifdef BENCH
1211  double cpu7=wall_time();
1212  SRConf<T>::time(7)+=cpu7-cpu6;
1213 #endif
1214 
1215 // i=std::min(i,long(0));
1216 
1217  Up=Up(Slice(0,i),Slice(_));
1218  VTp=VTp(Slice(0,i),Slice(_));
1219 
1220 
1221  // convert SVD output to our convention
1222  if (i>=0) {
1223 
1224  // transform 1 and 2
1225  x=inner(Up,x,1,0);
1226  y=inner(VTp,y,1,0); // 0.5 / 2.5
1227  weights=Sp(Slice(0,i));
1228 
1229  } else {
1230  x.clear();
1231  y.clear();
1232  weights.clear();
1233  }
1234 #ifdef BENCH
1235  double cpu8=wall_time();
1236  SRConf<T>::time(8)+=cpu8-cpu7;
1237  SRConf<T>::time(0)+=cpu8-cpu0;
1238 #endif
1239  return;
1240  }
1241 
1243 
1251  template<typename T>
1252  void ortho5(Tensor<T>& x1, Tensor<T>& y1, Tensor<double>& w1,
1253  const Tensor<T>& x2, const Tensor<T>& y2, const Tensor<double>& w2,
1254  const double& thresh) {
1255 
1256 #ifdef BENCH
1257  double cpu0=wall_time();
1258 #endif
1259  typedef Tensor<T> tensorT;
1260 
1261  const long rank1=x1.dim(0);
1262  const long rank2=x2.dim(0);
1263  const long rank=rank1+rank2;
1264 
1265  // for convenience: blocks of the matrices
1266  const Slice s0(0,rank1-1), s1(rank1,rank-1);
1267 
1268  const double w_max=std::max(w1.absmax(),w2.absmax());
1269  const double norm_max=w_max*rank; // max Frobenius norm
1270 
1271  // the overlap between 1 and 2;
1272  // the overlap of 1 and 1, and 2 and 2 is assumed to be the identity matrix
1273  tensorT Sx12=inner(x1,x2,1,1);
1274  tensorT Sy12=inner(y1,y2,1,1);
1275 #ifdef BENCH
1276  double cpu1=wall_time();
1277  SRConf<T>::time(11)+=cpu1-cpu0;
1278 #endif
1279 
1280  tensorT Sx(rank,rank);
1281  tensorT Sy(rank,rank);
1282 
1283  // the identity matrix (half of it)
1284  for (long i=0; i<rank; i++) {
1285  Sx(i,i)=0.5;
1286  Sy(i,i)=0.5;
1287  }
1288  Sx(s0,s1)=Sx12;
1289  Sy(s0,s1)=Sy12;
1290  Sx+=transpose(Sx);
1291  Sy+=transpose(Sy);
1292 
1293  // overlap of 1 and 2
1294 // tensorT S1=inner(x,x,1,1);
1295 // tensorT S2=inner(y,y,1,1); // 0.5 / 2.1
1296 #ifdef BENCH
1297  double cpu2=wall_time();
1298  SRConf<T>::time(12)+=cpu2-cpu1;
1299 #endif
1300 
1301  // diagonalize
1302  tensorT U1, U2;
1303  Tensor<double> e1, e2;
1304  syev(Sx,U1,e1);
1305  syev(Sy,U2,e2); // 2.3 / 4.0
1306 #ifdef BENCH
1307  double cpu3=wall_time();
1308  SRConf<T>::time(13)+=cpu3-cpu2;
1309 #endif
1310 
1311 // print("norm(Sx)",Sx.normf());
1312 // print("norm(Sy)",Sy.normf());
1313 
1314  const double e1_max=e1.absmax();
1315  const double e2_max=e2.absmax();
1316 
1317  // fast return if possible
1318  if ((e1_max*norm_max<thresh) or (e2_max*norm_max<thresh)) {
1319  x1.clear();
1320  y1.clear();
1321  w1.clear();
1322  return;
1323  }
1324 
1325  // remove small negative eigenvalues
1326  e1.screen(1.e-13);
1327  e2.screen(1.e-13);
1328  Tensor<double> sqrt_e1(rank), sqrt_e2(rank);
1329 
1330 
1331  // shrink U1, U2
1332  int lo1=0;
1333  int lo2=0;
1334  for (unsigned int r=0; r<rank; r++) {
1335  if (e1(r)<thresh/norm_max) lo1=r+1;
1336  else sqrt_e1(r)=sqrt(std::abs(e1(r)));
1337  if (e2(r)<thresh/norm_max) lo2=r+1;
1338  else sqrt_e2(r)=sqrt(std::abs(e2(r)));
1339  }
1340 
1341  U1=U1(Slice(_),Slice(lo1,-1));
1342  U2=U2(Slice(_),Slice(lo2,-1));
1343  sqrt_e1=sqrt_e1(Slice(lo1,-1));
1344  sqrt_e2=sqrt_e2(Slice(lo2,-1));
1345  unsigned int rank_x=rank-lo1;
1346  unsigned int rank_y=rank-lo2; // 0.0 / 0.0
1347 
1348 
1349 // MADNESS_ASSERT(sqrt_e1.size()==rank_x);
1350 // MADNESS_ASSERT(sqrt_e2.size()==rank_y);
1351 
1352  // set up overlap M; include X+
1353 
1354 // for (unsigned int i=0; i<rank_x; ++i) U(i,_)*=sqrt_e1(i);
1355 // for (unsigned int i=0; i<rank_y; ++i) U(i,_)*=sqrt_e2(i);
1356 
1357  tensorT UU1=copy(U1);
1358  for (unsigned int i=0; i<rank1; ++i) UU1(i,_)*=w1(i);
1359  for (unsigned int i=rank1; i<rank; ++i) UU1(i,_)*=w2(i-rank1);
1360 
1361  tensorT M=inner(UU1,U2,0,0);
1362  tensorT ee=outer(sqrt_e1,sqrt_e2);
1363  M.emul(ee);
1364 
1365  // include X-
1366  for (unsigned int r=0; r<rank_x; r++) {
1367  double fac=1.0/sqrt_e1(r);
1368  U1(_,r)*=fac;
1369 // for (unsigned int t=0; t<rank; t++) {
1370 // U1(t,r)*=fac;
1372 // }
1373  }
1374 
1375  for (unsigned int r=0; r<rank_y; r++) {
1376  double fac=1.0/sqrt_e2(r);
1377  U2(_,r)*=fac;
1378 // for (unsigned int t=0; t<rank; t++) {
1379 // U2(t,r)*=fac;
1381 // }
1382  } // 0.2 / 0.6
1383 #ifdef BENCH
1384  double cpu4=wall_time();
1385  SRConf<T>::time(14)+=cpu4-cpu3;
1386 #endif
1387 
1388 
1389  // decompose M
1390  tensorT Up,VTp;
1391  Tensor<double> Sp;
1392  svd(M,Up,Sp,VTp); // 1.5 / 3.0
1393 #ifdef BENCH
1394  double cpu5=wall_time();
1395  SRConf<T>::time(15)+=cpu5-cpu4;
1396 #endif
1397 
1398  // make transformation matrices
1399  Up=inner(Up,U1,0,1);
1400  VTp=inner(VTp,U2,1,1);
1401 #ifdef BENCH
1402  double cpu6=wall_time();
1403  SRConf<T>::time(16)+=cpu6-cpu5;
1404 #endif
1405 
1406  // find the maximal singular value that's supposed to contribute
1407  // singular values are ordered (largest first)
1408  double residual=0.0;
1409  long i;
1410  for (i=Sp.dim(0)-1; i>=0; i--) {
1411  residual+=Sp(i)*Sp(i);
1412  if (residual>thresh*thresh) break;
1413  }
1414 
1415  // convert SVD output to our convention
1416  if (i>=0) {
1417 
1418  // make it contiguous
1419  tensorT Up1=transpose(Up(Slice(0,i),s0));
1420  tensorT Up2=transpose(Up(Slice(0,i),s1));
1421  tensorT VTp1=transpose(VTp(Slice(0,i),s0));
1422  tensorT VTp2=transpose(VTp(Slice(0,i),s1));
1423 
1424  // transform 1 and 2
1425  x1=inner(Up1,x1,0,0);
1426  inner_result(Up2,x2,0,0,x1);
1427  y1=inner(VTp1,y1,0,0);
1428  inner_result(VTp2,y2,0,0,y1);
1429  w1=Sp(Slice(0,i));
1430 
1431  } else {
1432  x1.clear();
1433  y1.clear();
1434  w1.clear();
1435  }
1436 #ifdef BENCH
1437  double cpu7=wall_time();
1438  SRConf<T>::time(17)+=cpu7-cpu6;
1439  SRConf<T>::time(10)+=cpu7-cpu0;
1440 #endif
1441  return;
1442  }
1443 
1444  template<typename T>
1445  static inline
1446  std::ostream& operator<<(std::ostream& s, const SRConf<T>& sr) {
1447 
1448  s << "dim_ " << sr.dim_ << "\n";;
1449  s << "rank_ " << sr.rank_ << "\n";;
1450  s << "maxk_ " << sr.maxk_ << "\n";;
1451  s << "vector_.size()" << sr.vector_.size() << "\n";
1452  s << "has_data() " << sr.has_data() << "\n";
1453  s << "TensorType " << sr.type() << "\n\n";
1454  return s;
1455  }
1456 }
1457 
1458 #endif /* SRCONF_H_ */
~SRConf()
dtor
Definition: srconf.h:312
C++ prototypes for Fortran LAPACK with associated typedefs and macos.
long rank_
what is the rank of this
Definition: srconf.h:99
T norm(Vector< T, N > v)
Compute norm of a Vector.
Definition: array.h:447
std::complex< double > double_complex
Definition: lineplot.cc:16
SRConf(const unsigned int &dim, const unsigned int &k, const TensorType &tt)
ctor with dimensions for a vector configuration (tested)
Definition: srconf.h:144
SRConf(const Tensor< double > &weights, const std::vector< Tensor< T > > &vectors, const unsigned int &dim, const unsigned int maxk, const TensorType &tt)
ctor with provided weights and effective vectors; shallow copy
Definition: srconf.h:176
TensorType
low rank representations of tensors (see gentensor.h)
Definition: tensor.h:275
std::vector< tensorT > vector_
Definition: srconf.h:96
Tensor< T > tensorT
Definition: srconf.h:83
FLOAT e1(FLOAT z)
Definition: y1.cc:144
TensorType type() const
return the tensor type
Definition: srconf.h:324
unsigned int dim_
the number of dimensions (the order of the tensor)
Definition: srconf.h:89
void inner_result(const Tensor< T > &left, const Tensor< Q > &right, long k0, long k1, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result)
Accumulate inner product into user provided, contiguous, correctly sized result tensor.
Definition: tensor.h:2204
SRConf()
default ctor
Definition: srconf.h:140
const mpreal dim(const mpreal &a, const mpreal &b, mp_rnd_t rnd_mode)
Definition: mpreal.h:2201
GenTensor< T > outer(const GenTensor< T > &left, const GenTensor< T > &right)
Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
Definition: gentensor.h:230
void serialize(Archive &ar)
Definition: srconf.h:315
void ortho5(Tensor< T > &x1, Tensor< T > &y1, Tensor< double > &w1, const Tensor< T > &x2, const Tensor< T > &y2, const Tensor< double > &w2, const double &thresh)
specialized version of ortho3
Definition: srconf.h:1252
const Tensor< T > & ref_vector(const unsigned int &idim) const
return reference to one of the vectors F
Definition: srconf.h:694
const double beta
Definition: gygi_soltion.cc:63
T inner(const vecfunc< T, NDIM > &a, const vecfunc< T, NDIM > &b)
the non-linear solver requires an inner product
Definition: nemo.h:112
SRConf(const SRConf &rhs)
copy ctor (tested); shallow copy
Definition: srconf.h:170
void syev(const Tensor< T > &A, Tensor< T > &V, Tensor< typename Tensor< T >::scalar_type > &e)
Real-symmetric or complex-Hermitian eigenproblem.
Definition: lapack.cc:478
TensorType tensortype_
how will this be represented
Definition: srconf.h:112
Tensor< T > & ref_vector(const unsigned int &idim)
return reference to one of the vectors F
Definition: srconf.h:689
Defines and implements most of Tensor.
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition: type_data.h:209
static int max_sigma(const double &thresh, const int &rank, const Tensor< double > &w)
Definition: srconf.h:125
SRConf & operator=(const SRConf &rhs)
assignment operator (tested), shallow copy of vectors
Definition: srconf.h:237
const SRConf get_configs(const int &start, const int &end) const
Definition: srconf.h:285
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
Definition: tensor.h:275
#define max(a, b)
Definition: lda.h:53
Vector< T, 1 > vec(T x)
Your friendly neighborhood factory function.
Definition: array.h:456
void scale(World &world, std::vector< Function< T, NDIM > > &v, const std::vector< Q > &factors, bool fence=true)
Scales inplace a vector of functions by distinct values.
Definition: vmra.h:290
const int k
Definition: dielectric.cc:184
void ortho3(Tensor< T > &x, Tensor< T > &y, Tensor< double > &weights, const double &thresh)
sophisticated version of ortho2
Definition: srconf.h:1077
static const bool check_orthonormality
check orthonormality at low rank additions
Definition: srconf.h:86
Definition: srconf.h:64
friend bool compatible(const SRConf &lhs, const SRConf &rhs)
check compatibility
Definition: srconf.h:948
Prototypes for a partial interface from Tensor to LAPACK.
Tensor< TENSOR_RESULT_TYPE(T, Q)> transform_dir(const Tensor< T > &t, const Tensor< Q > &c, int axis)
Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor.
Definition: tensor.h:1929
#define TENSOR_MAXDIM
Definition: tensor_macros.h:194
Definition: tensor.h:275
void svd(const Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT)
Compute the singluar value decomposition of an n-by-m matrix using *gesvd.
Definition: lapack.cc:273
SRConf(const Tensor< double > &weights, const tensorT &vector1, const tensorT &vector2, const unsigned int &dim, const unsigned int maxk)
explicit ctor with two vectors (aka SVD), shallow
Definition: srconf.h:215
tensorT sqrt(const tensorT &s, double tol=1e-8)
Computes matrix square root (not used any more?)
Definition: DFcode/moldft.cc:446
double weights(const unsigned int &i) const
return the weight
Definition: srconf.h:838
bool has_data() const
does this have any data?
Definition: srconf.h:327
const mpreal sum(const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode)
Definition: mpreal.cc:241
int dim_per_vector() const
return the number of physical dimensions
Definition: srconf.h:830
GenTensor< TENSOR_RESULT_TYPE(T, Q)> general_transform(const GenTensor< T > &t, const Tensor< Q > c[])
Transform all dimensions of the tensor t by distinct matrices c.
Definition: gentensor.h:1342
friend SRConf< T > copy(const SRConf< T > &rhs)
deep copy of rhs, shrink
Definition: srconf.h:611
std::vector< Slice > s_
Definition: srconf.h:109
Tensor< T > transpose(const Tensor< T > &t)
Returns a new deep copy of the transpose of the input tensor.
Definition: tensor.h:1945
SRConf(const tensorT &vector1)
explicit ctor with one vector (aka full representation), shallow
Definition: srconf.h:202
unsigned int maxk_
Definition: srconf.h:105
void print(const A &a)
Print a single item to std::cout terminating with new line.
Definition: print.h:122
#define MADNESS_EXCEPTION(msg, value)
Definition: worldexc.h:88
bool has_structure() const
return if this has a tensor structure (has not been flattened)
Definition: srconf.h:801
Tensor< typename Tensor< T >::scalar_type > weights_
for each configuration the weight; length should be r
Definition: srconf.h:92
A slice defines a sub-range or patch of a dimension.
Definition: slice.h:103
Definition: gentensor.h:123
void orthonormalize(const double &thresh)
orthonormalize this
Definition: srconf.h:415
Definition: tensor.h:275
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
const double c
Definition: gfit.cc:200
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const DistributedMatrix< R > &c, bool fence=true)
Definition: chem/SCF.cc:86
friend const SRConf< Q > &lhs if((lhs.has_no_data()) or(rhs.has_no_data())) return 0.0
void swap(mpfr::mpreal &x, mpfr::mpreal &y)
Definition: mpreal.h:3069
double wall_time()
Returns the wall time in seconds relative to arbitrary origin.
Definition: world.cc:248
void normalize(World &world, std::vector< Function< T, NDIM > > &v, bool fence=true)
Normalizes a vector of functions — v[i] = v[i].scale(1.0/v[i].norm2())
Definition: vmra.h:765