MADNESS  version 0.9
mra.h
Go to the documentation of this file.
1 /*
2  This file is part of MADNESS.
3 
4  Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20  For more information please contact:
21 
22  Robert J. Harrison
23  Oak Ridge National Laboratory
24  One Bethel Valley Road
25  P.O. Box 2008, MS-6367
26 
27  email: harrisonrj@ornl.gov
28  tel: 865-241-3937
29  fax: 865-572-0680
30 
31 
32  $Id$
33 
34 */
35 
36 
37 #ifndef MADNESS_MRA_MRA_H__INCLUDED
38 #define MADNESS_MRA_MRA_H__INCLUDED
39 
49 #include <madness/world/world.h>
50 #include <madness/misc/misc.h>
51 #include <madness/tensor/tensor.h>
52 
53 #define FUNCTION_INSTANTIATE_1
54 #define FUNCTION_INSTANTIATE_2
55 #define FUNCTION_INSTANTIATE_3
56 #if !defined(HAVE_IBMBGP) || !defined(HAVE_IBMBGQ)
57 #define FUNCTION_INSTANTIATE_4
58 #define FUNCTION_INSTANTIATE_5
59 #define FUNCTION_INSTANTIATE_6
60 #endif
61 
62 static const bool VERIFY_TREE = false; //true;
63 
64 
65 namespace madness {
66  void startup(World& world, int argc, char** argv);
67 }
68 
69 #include <madness/mra/key.h>
70 #include <madness/mra/twoscale.h>
71 #include <madness/mra/legendre.h>
72 #include <madness/mra/indexit.h>
73 #include <madness/world/parar.h>
74 #include <madness/world/worlddc.h>
77 #include <madness/mra/lbdeux.h>
78 #include <madness/mra/funcimpl.h>
79 
80 // some forward declarations
81 namespace madness {
82 
83  template<typename T, std::size_t NDIM>
84  class FunctionImpl;
85 
86  template<typename T, std::size_t NDIM>
87  class Function;
88 
89  template<typename T, std::size_t NDIM>
90  class FunctionNode;
91 
92  template<typename T, std::size_t NDIM>
93  class FunctionFactory;
94 
95  template<typename T, std::size_t NDIM>
96  class FunctionFunctorInterface;
97 
98  template<typename T, std::size_t NDIM>
99  struct leaf_op;
100 
101  template<typename T, std::size_t NDIM>
102  struct mul_leaf_op;
103 
104  template<typename T, std::size_t NDIM>
105  struct hartree_leaf_op;
106 
107  template<typename T, std::size_t NDIM, std::size_t LDIM, typename opT>
109 
110  template<typename T, std::size_t NDIM, typename opT>
111  struct op_leaf_op;
112 
113  template<typename T, std::size_t NDIM>
114  struct error_leaf_op;
115 
116 }
117 
118 
119 namespace madness {
122 
124  template <typename T, std::size_t NDIM>
126  // We make all of the content of Function and FunctionImpl
127  // public with the intent of avoiding the cumbersome forward
128  // and friend declarations. However, this open access should
129  // not be abused.
130 
131  private:
133 
134  public:
139 
141  inline void verify() const {
142  MADNESS_ASSERT(impl);
143  }
144 
146  bool is_initialized() const {
147  return impl.get();
148  }
149 
151 
153  Function() : impl() {}
154 
155 
157  Function(const factoryT& factory)
158  : impl(new FunctionImpl<T,NDIM>(factory)) {
160  }
161 
162 
165  : impl(f.impl) {
166  }
167 
168 
172  if (this != &f) impl = f.impl;
173  return *this;
174  }
175 
178 
180 
185  Future<T> eval(const coordT& xuser) const {
187  const double eps=1e-15;
188  verify();
190  coordT xsim;
191  user_to_sim(xuser,xsim);
192  // If on the boundary, move the point just inside the
193  // volume so that the evaluation logic does not fail
194  for (std::size_t d=0; d<NDIM; ++d) {
195  if (xsim[d] < -eps) {
196  MADNESS_EXCEPTION("eval: coordinate lower-bound error in dimension", d);
197  }
198  else if (xsim[d] < eps) {
199  xsim[d] = eps;
200  }
201 
202  if (xsim[d] > 1.0+eps) {
203  MADNESS_EXCEPTION("eval: coordinate upper-bound error in dimension", d);
204  }
205  else if (xsim[d] > 1.0-eps) {
206  xsim[d] = 1.0-eps;
207  }
208  }
209 
210  Future<T> result;
211  impl->eval(xsim, impl->key0(), result.remote_ref(impl->world));
212  return result;
213  }
214 
216 
219  std::pair<bool,T> eval_local_only(const Vector<double,NDIM>& xuser, Level maxlevel) const {
220  const double eps=1e-15;
221  verify();
223  coordT xsim;
224  user_to_sim(xuser,xsim);
225  // If on the boundary, move the point just inside the
226  // volume so that the evaluation logic does not fail
227  for (std::size_t d=0; d<NDIM; ++d) {
228  if (xsim[d] < -eps) {
229  MADNESS_EXCEPTION("eval: coordinate lower-bound error in dimension", d);
230  }
231  else if (xsim[d] < eps) {
232  xsim[d] = eps;
233  }
234 
235  if (xsim[d] > 1.0+eps) {
236  MADNESS_EXCEPTION("eval: coordinate upper-bound error in dimension", d);
237  }
238  else if (xsim[d] > 1.0-eps) {
239  xsim[d] = 1.0-eps;
240  }
241  }
242  return impl->eval_local_only(xsim,maxlevel);
243  }
244 
251  Future<Level> evaldepthpt(const coordT& xuser) const {
253  const double eps=1e-15;
254  verify();
256  coordT xsim;
257  user_to_sim(xuser,xsim);
258  // If on the boundary, move the point just inside the
259  // volume so that the evaluation logic does not fail
260  for (std::size_t d=0; d<NDIM; ++d) {
261  if (xsim[d] < -eps) {
262  MADNESS_EXCEPTION("eval: coordinate lower-bound error in dimension", d);
263  }
264  else if (xsim[d] < eps) {
265  xsim[d] = eps;
266  }
267 
268  if (xsim[d] > 1.0+eps) {
269  MADNESS_EXCEPTION("eval: coordinate upper-bound error in dimension", d);
270  }
271  else if (xsim[d] > 1.0-eps) {
272  xsim[d] = 1.0-eps;
273  }
274  }
275 
276  Future<Level> result;
277  impl->evaldepthpt(xsim, impl->key0(), result.remote_ref(impl->world));
278  return result;
279  }
280 
281 
283 
288  Future<long> evalR(const coordT& xuser) const {
290  const double eps=1e-15;
291  verify();
293  coordT xsim;
294  user_to_sim(xuser,xsim);
295  // If on the boundary, move the point just inside the
296  // volume so that the evaluation logic does not fail
297  for (std::size_t d=0; d<NDIM; ++d) {
298  if (xsim[d] < -eps) {
299  MADNESS_EXCEPTION("eval: coordinate lower-bound error in dimension", d);
300  }
301  else if (xsim[d] < eps) {
302  xsim[d] = eps;
303  }
304 
305  if (xsim[d] > 1.0+eps) {
306  MADNESS_EXCEPTION("eval: coordinate upper-bound error in dimension", d);
307  }
308  else if (xsim[d] > 1.0-eps) {
309  xsim[d] = 1.0-eps;
310  }
311  }
312 
313  Future<long> result;
314  impl->evalR(xsim, impl->key0(), result.remote_ref(impl->world));
315  return result;
316  }
317 
319 
322 
325 
329  Tensor<T> eval_cube(const Tensor<double>& cell,
330  const std::vector<long>& npt,
331  bool eval_refine = false) const {
332  MADNESS_ASSERT(static_cast<std::size_t>(cell.dim(0))>=NDIM && cell.dim(1)==2 && npt.size()>=NDIM);
334  const double eps=1e-14;
335  verify();
336  reconstruct();
337  coordT simlo, simhi;
338  for (std::size_t d=0; d<NDIM; ++d) {
339  simlo[d] = cell(d,0);
340  simhi[d] = cell(d,1);
341  }
342  user_to_sim(simlo, simlo);
343  user_to_sim(simhi, simhi);
344 
345  // Move the bounding box infintesimally inside dyadic
346  // points so that the evaluation logic does not fail
347  for (std::size_t d=0; d<NDIM; ++d) {
348  MADNESS_ASSERT(simhi[d] >= simlo[d]);
349  MADNESS_ASSERT(simlo[d] >= 0.0);
350  MADNESS_ASSERT(simhi[d] <= 1.0);
351 
352  double delta = eps*(simhi[d]-simlo[d]);
353  simlo[d] += delta;
354  simhi[d] -= 2*delta; // deliberate asymmetry
355  }
356  return impl->eval_plot_cube(simlo, simhi, npt, eval_refine);
357  }
358 
359 
361 
370  T operator()(const coordT& xuser) const {
372  verify();
373  if (is_compressed()) reconstruct();
374  T result;
375  if (impl->world.rank() == 0) result = eval(xuser).get();
376  impl->world.gop.broadcast(result);
377  //impl->world.gop.fence();
378  return result;
379  }
380 
382 
384  T operator()(double x, double y=0, double z=0, double xx=0, double yy=0, double zz=0) const {
385  coordT r;
386  r[0] = x;
387  if (NDIM>=2) r[1] = y;
388  if (NDIM>=3) r[2] = z;
389  if (NDIM>=4) r[3] = xx;
390  if (NDIM>=5) r[4] = yy;
391  if (NDIM>=6) r[5] = zz;
392  return (*this)(r);
393  }
394 
405  Level depthpt(const coordT& xuser) const {
407  verify();
408  if (is_compressed()) reconstruct();
409  Level result;
410  if (impl->world.rank() == 0) result = evaldepthpt(xuser).get();
411  impl->world.gop.broadcast(result);
412  //impl->world.gop.fence();
413  return result;
414  }
415 
417 
423  template <typename funcT>
424  double errsq_local(const funcT& func) const {
426  verify();
427  if (is_compressed()) MADNESS_EXCEPTION("Function:errsq_local:not reconstructed",0);
428  return impl->errsq_local(func);
429  }
430 
431 
433 
438  template <typename funcT>
439  double err(const funcT& func) const {
441  verify();
442  if (VERIFY_TREE) verify_tree();
443  if (is_compressed()) reconstruct();
444  if (VERIFY_TREE) verify_tree();
445  double local = impl->errsq_local(func);
446  impl->world.gop.sum(local);
447  impl->world.gop.fence();
448  return sqrt(local);
449  }
450 
452  void verify_tree() const {
454  if (impl) impl->verify_tree();
455  }
456 
457 
459 
461  bool is_compressed() const {
463  if (impl)
464  return impl->is_compressed();
465  else
466  return false;
467  }
468 
469 
471  std::size_t tree_size() const {
473  if (!impl) return 0;
474  return impl->tree_size();
475  }
476 
478  void print_size(const std::string name) const {
479  if (!impl) print("function",name,"not assigned yet");
480  impl->print_size(name);
481  }
482 
484  std::size_t max_depth() const {
486  if (!impl) return 0;
487  return impl->max_depth();
488  }
489 
490 
492  std::size_t max_local_depth() const {
494  if (!impl) return 0;
495  return impl->max_local_depth();
496  }
497 
498 
500  std::size_t max_nodes() const {
502  if (!impl) return 0;
503  return impl->max_nodes();
504  }
505 
507  std::size_t min_nodes() const {
509  if (!impl) return 0;
510  return impl->min_nodes();
511  }
512 
513 
515  std::size_t size() const {
517  if (!impl) return 0;
518  return impl->size();
519  }
520 
521 
523  bool autorefine() const {
525  if (!impl) return true;
526  return impl->get_autorefine();
527  }
528 
529 
531 
533  void set_autorefine(bool value, bool fence = true) {
535  verify();
536  impl->set_autorefine(value);
537  if (fence) impl->world.gop.fence();
538  }
539 
540 
542  double thresh() const {
544  if (!impl) return 0.0;
545  return impl->get_thresh();
546  }
547 
548 
550 
552  void set_thresh(double value, bool fence = true) {
554  verify();
555  impl->set_thresh(value);
556  if (fence) impl->world.gop.fence();
557  }
558 
559 
561  int k() const {
563  verify();
564  return impl->get_k();
565  }
566 
567 
569 
577  Function<T,NDIM>& truncate(double tol = 0.0, bool fence = true) {
579  if (!impl) return *this;
580  verify();
581 // if (!is_compressed()) compress();
582  impl->truncate(tol,fence);
583  if (VERIFY_TREE) verify_tree();
584  return *this;
585  }
586 
587 
591  verify();
592  return impl;
593  }
594 
598  this->impl = impl;
599  }
600 
601 
603 
607  this->impl->set_functor(functor);
608  print("set functor in mra.h");
609  }
610 
611  bool is_on_demand() const {return this->impl->is_on_demand();}
612 
614 
616  template <typename R>
617  void set_impl(const Function<R,NDIM>& f, bool zero = true) {
618  impl = std::shared_ptr<implT>(new implT(*f.get_impl(), f.get_pmap(), zero));
619  }
620 
622  World& world() const {
624  verify();
625  return impl->world;
626  }
627 
628 
632  verify();
633  return impl->get_pmap();
634  }
635 
636 
638 
640  double norm2sq_local() const {
642  verify();
643  return impl->norm2sq_local();
644  }
645 
646 
648 
650  double norm2() const {
652  verify();
653  if (VERIFY_TREE) verify_tree();
654  double local = impl->norm2sq_local();
655 
656  impl->world.gop.sum(local);
657  impl->world.gop.fence();
658  return sqrt(local);
659  }
660 
661 
663  void norm_tree(bool fence = true) const {
665  verify();
666  if (VERIFY_TREE) verify_tree();
667  if (is_compressed()) reconstruct();
668  const_cast<Function<T,NDIM>*>(this)->impl->norm_tree(fence);
669  }
670 
671 
673 
683  const Function<T,NDIM>& compress(bool fence = true) const {
685  if (!impl || is_compressed()) return *this;
686  if (VERIFY_TREE) verify_tree();
687  impl->world.gop.fence();
688  const_cast<Function<T,NDIM>*>(this)->impl->compress(false, false, false, fence);
689  return *this;
690  }
691 
692 
694 
701  void nonstandard(bool keepleaves, bool fence=true) {
703  verify();
704  if (impl->is_nonstandard()) return;
705  if (VERIFY_TREE) verify_tree();
706  if (is_compressed()) reconstruct();
707  impl->compress(true, keepleaves, false, fence);
708  }
709 
711 
718  void standard(bool fence = true) {
720  verify();
722  impl->standard(fence);
723  if (fence && VERIFY_TREE) verify_tree();
724  }
725 
727 
737  void reconstruct(bool fence = true) const {
739  if (!impl || !is_compressed()) return;
740  const_cast<Function<T,NDIM>*>(this)->impl->reconstruct(fence);
741  if (fence && VERIFY_TREE) verify_tree(); // Must be after in case nonstandard
742  }
743 
744 
746  void sum_down(bool fence = true) const {
748  verify();
750  const_cast<Function<T,NDIM>*>(this)->impl->sum_down(fence);
751  if (fence && VERIFY_TREE) verify_tree(); // Must be after in case nonstandard
752  }
753 
754 
756  template <typename opT>
757  void refine_general(const opT& op, bool fence = true) const {
759  verify();
760  if (is_compressed()) reconstruct();
761  impl->refine(op, fence);
762  }
763 
764 
766  bool operator()(implT* impl, const Key<NDIM>& key, const nodeT& t) const {
767  return impl->autorefine_square_test(key, t);
768  }
769 
770  template <typename Archive> void serialize (Archive& ar) {}
771  };
772 
774  void refine(bool fence = true) {
775  refine_general(autorefine_square_op(), fence);
776  }
777 
780  bool fence = true) const {
781  verify();
782  reconstruct();
783  impl->broaden(bc.is_periodic(), fence);
784  }
785 
786 
788  Tensor<T> coeffs_for_jun(Level n, long mode=0) {
790  nonstandard(true,true);
791  return impl->coeffs_for_jun(n,mode);
792  //return impl->coeffs_for_jun(n);
793  }
794 
795 
797 
799  void clear(bool fence = true) {
801  if (impl) {
802  World& world = impl->world;
803  impl.reset();
804  if (fence) world.gop.fence();
805  }
806  }
807 
809  void print_tree(std::ostream& os = std::cout) const {
811  if (impl) impl->print_tree(os);
812  }
813 
815  void print_tree_graphviz(std::ostream& os = std::cout) const {
817  os << "digraph G {" << std::endl;
818  if (impl) impl->print_tree_graphviz(os);
819  os << "}" << std::endl;
820  }
821 
823 
825  void print_info() const {
827  if (impl) impl->print_info();
828  }
829 
831  T (*f)(T);
833  void operator()(const Key<NDIM>& key, Tensor<T>& t) const {
834  UNARY_OPTIMIZED_ITERATOR(T, t, *_p0 = f(*_p0));
835  }
836  template <typename Archive> void serialize(Archive& ar) {}
837  };
838 
840  void unaryop(T (*f)(T)) {
841  // Must fence here due to temporary object on stack
842  // stopping us returning before complete
843  this->unaryop(SimpleUnaryOpWrapper(f));
844  }
845 
846 
848  template <typename opT>
849  void unaryop(const opT& op, bool fence=true) {
851  verify();
852  reconstruct();
853  impl->unary_op_value_inplace(op, fence);
854  }
855 
856 
858  template <typename opT>
859  void unaryop_coeff(const opT& op,
860  bool fence = true) {
862  verify();
863  impl->unary_op_coeff_inplace(op, fence);
864  }
865 
866 
868  template <typename opT>
869  void unaryop_node(const opT& op,
870  bool fence = true) {
872  verify();
873  impl->unary_op_node_inplace(op, fence);
874  }
875 
876 
877  static void doconj(const Key<NDIM>, Tensor<T>& t) {
879  t.conj();
880  }
881 
883 
885  Function<T,NDIM> conj(bool fence = true) {
888  return *this;
889  }
890 
891 
893 
895  template <typename Q>
896  Function<T,NDIM>& scale(const Q q, bool fence=true) {
898  verify();
899  if (VERIFY_TREE) verify_tree();
900  impl->scale_inplace(q,fence);
901  return *this;
902  }
903 
904 
908  verify();
909  if (VERIFY_TREE) verify_tree();
910  impl->add_scalar_inplace(t,fence);
911  return *this;
912  }
913 
914 
916 
923  template <typename Q, typename R>
924  Function<T,NDIM>& gaxpy(const T& alpha,
925  const Function<Q,NDIM>& other, const R& beta, bool fence=true) {
927  verify();
928  other.verify();
930  if (is_compressed()) impl->gaxpy_inplace(alpha,*other.get_impl(),beta,fence);
931  if (not is_compressed()) impl->gaxpy_inplace_reconstructed(alpha,*other.get_impl(),beta,fence);
932  return *this;
933  }
934 
935 
937 
942  template <typename Q>
945  if (NDIM<=3) {
946  compress();
947  other.compress();
948  } else {
949  reconstruct();
950  other.reconstruct();
951  }
953  if (VERIFY_TREE) verify_tree();
954  if (VERIFY_TREE) other.verify_tree();
955  return gaxpy(T(1.0), other, Q(1.0), true);
956  }
957 
958 
960 
962  template <typename Q>
965  if (NDIM<=3) {
966  compress();
967  other.compress();
968  } else {
969  reconstruct();
970  other.reconstruct();
971  }
973  if (VERIFY_TREE) verify_tree();
974  if (VERIFY_TREE) other.verify_tree();
975  return gaxpy(T(1.0), other, Q(-1.0), true);
976  }
977 
978 
980 
982  template <typename Q>
984  operator*=(const Q q) {
986  scale(q,true);
987  return *this;
988  }
989 
990 
992 
994  Function<T,NDIM>& square(bool fence = true) {
996  if (is_compressed()) reconstruct();
997  if (VERIFY_TREE) verify_tree();
998  impl->square_inplace(fence);
999  return *this;
1000  }
1001 
1003  Function<T,NDIM>& abs(bool fence = true) {
1005  if (is_compressed()) reconstruct();
1006  if (VERIFY_TREE) verify_tree();
1007  impl->abs_inplace(fence);
1008  return *this;
1009  }
1010 
1014  if (is_compressed()) reconstruct();
1015  if (VERIFY_TREE) verify_tree();
1016  impl->abs_square_inplace(fence);
1017  return *this;
1018  }
1019 
1021 
1025  T trace_local() const {
1027  if (!impl) return 0.0;
1028  if (VERIFY_TREE) verify_tree();
1029  return impl->trace_local();
1030  }
1031 
1032 
1034  T trace() const {
1036  if (!impl) return 0.0;
1037  T sum = impl->trace_local();
1038  impl->world.gop.sum(sum);
1039  impl->world.gop.fence();
1040  return sum;
1041  }
1042 
1043 
1045  template <typename R>
1046  TENSOR_RESULT_TYPE(T,R) inner_local(const Function<R,NDIM>& g) const {
1049  MADNESS_ASSERT(g.is_compressed());
1050  if (VERIFY_TREE) verify_tree();
1051  if (VERIFY_TREE) g.verify_tree();
1052  return impl->inner_local(*(g.get_impl()));
1053  }
1054 
1055 
1057 
1059  template<typename R>
1063 
1064  // clear what we have
1065  impl->get_coeffs().clear();
1066 
1067  leaf_op<T,NDIM> gnode_is_leaf(g.get_impl().get());
1068  impl->make_Vphi(gnode_is_leaf,fence);
1069  return *this;
1070 
1071  }
1072 
1074 
1076  template<typename opT>
1077  Function<T,NDIM>& fill_tree(const opT& op, bool fence=true) {
1079 
1080  // clear what we have
1081  impl->get_coeffs().clear();
1082  op_leaf_op<T,NDIM,opT> leaf_op(&op,this->get_impl().get());
1083  impl->make_Vphi(leaf_op,fence);
1084  return *this;
1085  }
1086 
1088 
1092 
1093  // clear what we have
1094  impl->get_coeffs().clear();
1095  error_leaf_op<T,NDIM> leaf_op(this->get_impl().get());
1096  impl->make_Vphi(leaf_op,fence);
1097  return *this;
1098  }
1099 
1100 
1102  template<size_t LDIM, size_t KDIM, typename opT>
1104  const opT* op) {
1105 
1106  // get the right leaf operator
1108  impl->hartree_product(left,right,leaf_op,true);
1109  this->truncate(0.0,false);
1110 
1111  }
1112 
1114  template<size_t LDIM, size_t KDIM>
1116 
1117 // hartree_leaf_op<T,KDIM+LDIM> leaf_op(impl.get(),cdata.s0);
1119  impl->hartree_product(left,right,leaf_op,true);
1120  this->truncate(0.0,false);
1121 
1122  }
1123 
1125 
1128  template <typename R>
1129  TENSOR_RESULT_TYPE(T,R) inner(const Function<R,NDIM>& g) const {
1131 
1132  // fast return if possible
1133  if (not this->is_initialized()) return 0.0;
1134  if (not g.is_initialized()) return 0.0;
1135 
1136  // if this and g are the same, use norm2()
1137  if (this->get_impl()==g.get_impl()) {
1138  double norm=this->norm2();
1139  return norm*norm;
1140  }
1141 
1142  // do it case-by-case
1143  if (this->is_on_demand()) return g.inner_on_demand(*this);
1144  if (g.is_on_demand()) return this->inner_on_demand(g);
1145 
1146  if (VERIFY_TREE) verify_tree();
1147  if (VERIFY_TREE) g.verify_tree();
1148 
1149  // compression is more efficient for 3D
1150  if (NDIM==3) {
1151  if (!is_compressed()) compress(false);
1152  if (!g.is_compressed()) g.compress(false);
1153  impl->world.gop.fence();
1154  }
1155 
1156  if (this->is_compressed() and g.is_compressed()) {
1157  } else {
1158  if (not this->get_impl()->is_redundant()) this->get_impl()->make_redundant(false);
1159  if (not g.get_impl()->is_redundant()) g.get_impl()->make_redundant(false);
1160  impl->world.gop.fence();
1161  }
1162 
1163 
1164  TENSOR_RESULT_TYPE(T,R) local = impl->inner_local(*g.get_impl());
1165  impl->world.gop.sum(local);
1166  impl->world.gop.fence();
1167 
1168  if (this->get_impl()->is_redundant()) this->get_impl()->undo_redundant(false);
1169  if (g.get_impl()->is_redundant()) g.get_impl()->undo_redundant(false);
1170  impl->world.gop.fence();
1171 
1172  return local;
1173  }
1174 
1183  T inner_ext_local(T (*f)(const coordT&), const bool leaf_refine=true, const bool keep_redundant=false) const {
1185  if (not impl->is_redundant()) impl->make_redundant(true);
1186  T local = impl->inner_ext_local(f, leaf_refine);
1187  if (not keep_redundant) impl->undo_redundant(true);
1188  return local;
1189  }
1190 
1199  T inner_ext(T (*f)(const coordT&), const bool leaf_refine=true, const bool keep_redundant=false) const {
1201  if (not impl->is_redundant()) impl->make_redundant(true);
1202  T local = impl->inner_ext_local(f, leaf_refine);
1203  impl->world.gop.sum(local);
1204  impl->world.gop.fence();
1205  if (not keep_redundant) impl->undo_redundant(true);
1206  return local;
1207  }
1208 
1217  T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine=true, const bool keep_redundant=false) const {
1219  if (not impl->is_redundant()) impl->make_redundant(true);
1220  T local = impl->inner_ext_local(f, leaf_refine);
1221  if (not keep_redundant) impl->undo_redundant(true);
1222  return local;
1223  }
1224 
1233  T inner_ext(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine=true, const bool keep_redundant=false) const {
1235  if (not impl->is_redundant()) impl->make_redundant(true);
1236  T local = impl->inner_ext_local(f, leaf_refine);
1237  impl->world.gop.sum(local);
1238  impl->world.gop.fence();
1239  if (not keep_redundant) impl->undo_redundant(true);
1240  return local;
1241  }
1242 
1248  template <typename L>
1249  Void gaxpy_ext(const Function<L,NDIM>& left, T (*f)(const coordT&), T alpha, T beta, double tol, bool fence=true) const {
1251  if (left.is_compressed()) left.reconstruct();
1252  impl->gaxpy_ext(left.get_impl().get(), f, alpha, beta, tol, fence);
1253  return None;
1254  }
1255 
1257 
1264  template <typename R>
1265  TENSOR_RESULT_TYPE(T,R) inner_on_demand(const Function<R,NDIM>& g) const {
1266  MADNESS_ASSERT(g.is_on_demand() and (not this->is_on_demand()));
1267 
1268  this->reconstruct();
1269 
1270  // save for later, will be removed by make_Vphi
1272  leaf_op<T,NDIM> fnode_is_leaf(this->get_impl().get());
1273  g.get_impl()->make_Vphi(fnode_is_leaf,true); // fence here
1274 
1275  if (VERIFY_TREE) verify_tree();
1276  TENSOR_RESULT_TYPE(T,R) local = impl->inner_local(*g.get_impl());
1277  impl->world.gop.sum(local);
1278  impl->world.gop.fence();
1279 
1280  // restore original state
1281  g.get_impl()->set_functor(func);
1282  g.get_impl()->get_coeffs().clear();
1283  g.get_impl()->is_on_demand()=true;
1284 
1285  return local;
1286  }
1287 
1289 
1293  template <typename R, size_t LDIM>
1295  if (NDIM<=LDIM) MADNESS_EXCEPTION("confused dimensions in project_out?",1);
1296  MADNESS_ASSERT(dim==0 or dim==1);
1297  verify();
1298  typedef TENSOR_RESULT_TYPE(T,R) resultT;
1299  static const size_t KDIM=NDIM-LDIM;
1300 
1302  .k(g.k()).thresh(g.thresh());
1303  Function<resultT,KDIM> result=factory; // no empty() here!
1304 
1305  FunctionImpl<R,LDIM>* gimpl = const_cast< FunctionImpl<R,LDIM>* >(g.get_impl().get());
1306 
1307  this->reconstruct();
1308  gimpl->make_redundant(true);
1309  this->get_impl()->project_out(result.get_impl().get(),gimpl,dim,true);
1310 // result.get_impl()->project_out2(this->get_impl().get(),gimpl,dim);
1311  result.world().gop.fence();
1312  result.get_impl()->trickle_down(true);
1313  gimpl->undo_redundant(true);
1314  return result;
1315  }
1316 
1317 
1319 
1323  template <typename Archive>
1324  void load(World& world, Archive& ar) {
1326  // Type checking since we are probably circumventing the archive's own type checking
1327  long magic = 0l, id = 0l, ndim = 0l, k = 0l;
1328  ar & magic & id & ndim & k;
1329  MADNESS_ASSERT(magic == 7776768); // Mellow Mushroom Pizza tel.# in Knoxville
1331  MADNESS_ASSERT(ndim == NDIM);
1332 
1333  impl.reset(new implT(FunctionFactory<T,NDIM>(world).k(k).empty()));
1334 
1335  impl->load(ar);
1336  }
1337 
1338 
1340 
1344  template <typename Archive>
1345  void store(Archive& ar) const {
1347  verify();
1348  // For type checking, etc.
1349  ar & long(7776768) & long(TensorTypeData<T>::id) & long(NDIM) & long(k());
1350 
1351  impl->store(ar);
1352  }
1353 
1355 
1357  void change_tensor_type(const TensorArgs& targs, bool fence=true) {
1358  if (not impl) return;
1359  impl->change_tensor_type1(targs,fence);
1360  }
1361 
1362 
1364  template <typename Q, typename opT>
1366  const opT& op, bool fence) {
1368  func.verify();
1369  MADNESS_ASSERT(!(func.is_compressed()));
1370  if (VERIFY_TREE) func.verify_tree();
1371  impl.reset(new implT(*func.get_impl(), func.get_pmap(), false));
1372  impl->unaryXX(func.get_impl().get(), op, fence);
1373  return *this;
1374  }
1375 
1377  template <typename Q, std::size_t D>
1378  static std::vector< std::shared_ptr< FunctionImpl<Q,D> > > vimpl(const std::vector< Function<Q,D> >& v) {
1380  std::vector< std::shared_ptr< FunctionImpl<Q,D> > > r(v.size());
1381  for (unsigned int i=0; i<v.size(); ++i) r[i] = v[i].get_impl();
1382  return r;
1383  }
1384 
1386  void refine_to_common_level(std::vector< Function<T,NDIM> >& vf, bool fence=true) {
1387  Key<NDIM> key0(0, Vector<Translation, NDIM> (0));
1388  std::vector< Tensor<T> > c(vf.size());
1389  std::vector<implT*> v(vf.size());
1390  bool mustfence = false;
1391  for (unsigned int i=0; i<v.size(); ++i) {
1392  if (vf[i].is_compressed()) {
1393  vf[i].reconstruct(false);
1394  mustfence = true;
1395  }
1396  v[i] = vf[i].get_impl().get();
1397  }
1398  vf[0].impl->refine_to_common_level(v, c, key0);
1399  if (mustfence) vf[0].world().gop.fence();
1400  if (fence) vf[0].world().gop.fence();
1401  //if (VERIFY_TREE)
1402  for (unsigned int i=0; i<vf.size(); i++) vf[i].verify_tree();
1403  }
1404 
1406  template <typename opT>
1407  Function<T,NDIM>& multiop_values(const opT& op, const std::vector< Function<T,NDIM> >& vf) {
1408  std::vector<implT*> v(vf.size());
1409  for (unsigned int i=0; i<v.size(); ++i) {
1410  v[i] = vf[i].get_impl().get();
1411  }
1412  impl->multiop_values(op, v);
1413  world().gop.fence();
1414  return *this;
1415  }
1416 
1418  template <typename L, typename R>
1419  void vmulXX(const Function<L,NDIM>& left,
1420  const std::vector< Function<R,NDIM> >& right,
1421  std::vector< Function<T,NDIM> >& result,
1422  double tol,
1423  bool fence) {
1425 
1426  std::vector<FunctionImpl<T,NDIM>*> vresult(right.size());
1427  std::vector<const FunctionImpl<R,NDIM>*> vright(right.size());
1428  for (unsigned int i=0; i<right.size(); ++i) {
1429  result[i].set_impl(left,false);
1430  vresult[i] = result[i].impl.get();
1431  vright[i] = right[i].impl.get();
1432  }
1433 
1434  left.world().gop.fence(); // Is this still essential? Yes.
1435  vresult[0]->mulXXvec(left.get_impl().get(), vright, vresult, tol, fence);
1436  }
1437 
1439 
1441  template<typename L, typename R>
1442  void mul_on_demand(const Function<L,NDIM>& f, const Function<R,NDIM>& g, bool fence=true) {
1443  const FunctionImpl<L,NDIM>* fimpl=f.get_impl().get();
1444  const FunctionImpl<R,NDIM>* gimpl=g.get_impl().get();
1445  if (fimpl->is_on_demand() and gimpl->is_on_demand()) {
1446  MADNESS_EXCEPTION("can't multiply two on-demand functions",1);
1447  }
1448 
1449  if (fimpl->is_on_demand()) {
1450  leaf_op<R,NDIM> leaf_op1(gimpl);
1451  impl->multiply(leaf_op1,gimpl,fimpl,fence);
1452  } else {
1453  leaf_op<L,NDIM> leaf_op1(fimpl);
1454  impl->multiply(leaf_op1,fimpl,gimpl,fence);
1455  }
1456  }
1457 
1459  template <typename R, typename Q>
1460  void vtransform(const std::vector< Function<R,NDIM> >& v,
1461  const Tensor<Q>& c,
1462  std::vector< Function<T,NDIM> >& vresult,
1463  double tol,
1464  bool fence=true) {
1466  vresult[0].impl->vtransform(vimpl(v), c, vimpl(vresult), tol, fence);
1467  }
1468 
1470  template <typename L, typename R>
1472  T beta, const Function<R,NDIM>& right, bool fence) {
1474  left.verify();
1475  right.verify();
1476  MADNESS_ASSERT(left.is_compressed() && right.is_compressed());
1477  if (VERIFY_TREE) left.verify_tree();
1478  if (VERIFY_TREE) right.verify_tree();
1479  impl.reset(new implT(*left.get_impl(), left.get_pmap(), false));
1480  impl->gaxpy(alpha,*left.get_impl(),beta,*right.get_impl(),fence);
1481  return *this;
1482  }
1483 
1485  Function<T,NDIM>& mapdim(const Function<T,NDIM>& f, const std::vector<long>& map, bool fence) {
1487  f.verify();
1488  if (VERIFY_TREE) f.verify_tree();
1489  for (std::size_t i=0; i<NDIM; ++i) MADNESS_ASSERT(map[i]>=0 && static_cast<std::size_t>(map[i])<NDIM);
1490  impl.reset(new implT(*f.impl, f.get_pmap(), false));
1491  impl->mapdim(*f.impl,map,fence);
1492  return *this;
1493  }
1494 
1496  double check_symmetry() const {
1497 
1498  impl->make_redundant(true);
1499  if (VERIFY_TREE) verify_tree();
1500  double local = impl->check_symmetry_local();
1501  impl->world.gop.sum(local);
1502  impl->world.gop.fence();
1503  double asy=sqrt(local);
1504  if (this->world().rank()==0) print("asymmetry wrt particle",asy);
1505  impl->undo_redundant(true);
1506  return asy;
1507  }
1508 
1510  Function<T,NDIM>& reduce_rank(const bool fence=true) {
1511  verify();
1512  impl->reduce_rank(impl->get_tensor_args(),fence);
1513  return *this;
1514  }
1515  };
1516 
1517  template <typename T, typename opT, int NDIM>
1518  Function<T,NDIM> multiop_values(const opT& op, const std::vector< Function<T,NDIM> >& vf) {
1519  Function<T,NDIM> r;
1520  r.set_impl(vf[0], false);
1521  r.multiop_values(op, vf);
1522  return r;
1523  }
1524 
1526  template <typename Q, typename T, std::size_t NDIM>
1527  Function<TENSOR_RESULT_TYPE(Q,T),NDIM>
1528  mul(const Q alpha, const Function<T,NDIM>& f, bool fence=true) {
1529  PROFILE_FUNC;
1530  f.verify();
1531  if (VERIFY_TREE) f.verify_tree();
1533  result.set_impl(f, false);
1534  result.get_impl()->scale_oop(alpha,*f.get_impl(),fence);
1535  return result;
1536  }
1537 
1538 
1540  template <typename Q, typename T, std::size_t NDIM>
1541  Function<TENSOR_RESULT_TYPE(Q,T),NDIM>
1542  mul(const Function<T,NDIM>& f, const Q alpha, bool fence=true) {
1543  PROFILE_FUNC;
1544  return mul(alpha,f,fence);
1545  }
1546 
1547 
1549 
1551  template <typename Q, typename T, std::size_t NDIM>
1552  Function<TENSOR_RESULT_TYPE(Q,T),NDIM>
1553  operator*(const Function<T,NDIM>& f, const Q alpha) {
1554  return mul(alpha, f, true);
1555  }
1556 
1558 
1560  template <typename Q, typename T, std::size_t NDIM>
1561  Function<TENSOR_RESULT_TYPE(Q,T),NDIM>
1562  operator*(const Q alpha, const Function<T,NDIM>& f) {
1563  return mul(alpha, f, true);
1564  }
1565 
1567  template <typename L, typename R,std::size_t NDIM>
1568  Function<TENSOR_RESULT_TYPE(L,R),NDIM>
1569  mul_sparse(const Function<L,NDIM>& left, const Function<R,NDIM>& right, double tol, bool fence=true) {
1570  PROFILE_FUNC;
1571  left.verify();
1572  right.verify();
1573  MADNESS_ASSERT(!(left.is_compressed() || right.is_compressed()));
1574  if (VERIFY_TREE) left.verify_tree();
1575  if (VERIFY_TREE) right.verify_tree();
1576 
1578  result.set_impl(left, false);
1579  result.get_impl()->mulXX(left.get_impl().get(), right.get_impl().get(), tol, fence);
1580  return result;
1581  }
1582 
1584  template <typename L, typename R,std::size_t NDIM>
1585  Function<TENSOR_RESULT_TYPE(L,R),NDIM>
1586  mul(const Function<L,NDIM>& left, const Function<R,NDIM>& right, bool fence=true) {
1587  return mul_sparse(left,right,0.0,fence);
1588  }
1589 
1591  template <typename L, typename R, typename opT, std::size_t NDIM>
1592  Function<TENSOR_RESULT_TYPE(L,R),NDIM>
1593  binary_op(const Function<L,NDIM>& left, const Function<R,NDIM>& right, const opT& op, bool fence=true) {
1594  PROFILE_FUNC;
1595  if (left.is_compressed()) left.reconstruct();
1596  if (right.is_compressed()) right.reconstruct();
1597 
1599  result.set_impl(left, false);
1600  result.get_impl()->binaryXX(left.get_impl().get(), right.get_impl().get(), op, fence);
1601  return result;
1602  }
1603 
1605  template <typename Q, typename opT, std::size_t NDIM>
1606  Function<typename opT::resultT, NDIM>
1607  unary_op(const Function<Q,NDIM>& func, const opT& op, bool fence=true) {
1608  if (func.is_compressed()) func.reconstruct();
1610  if (VERIFY_TREE) func.verify_tree();
1611  result.set_impl(func, false);
1612  result.get_impl()->unaryXXvalues(func.get_impl().get(), op, fence);
1613  return result;
1614  }
1615 
1616 
1618  template <typename Q, typename opT, std::size_t NDIM>
1619  Function<typename opT::resultT, NDIM>
1620  unary_op_coeffs(const Function<Q,NDIM>& func, const opT& op, bool fence=true) {
1621  if (func.is_compressed()) func.reconstruct();
1623  return result.unary_op_coeffs(func,op,fence);
1624  }
1625 
1627 
1632  template <typename L, typename R, std::size_t D>
1633  std::vector< Function<TENSOR_RESULT_TYPE(L,R),D> >
1634  vmulXX(const Function<L,D>& left, const std::vector< Function<R,D> >& vright, double tol, bool fence=true) {
1635  if (vright.size() == 0) return std::vector< Function<TENSOR_RESULT_TYPE(L,R),D> >();
1636  std::vector< Function<TENSOR_RESULT_TYPE(L,R),D> > vresult(vright.size());
1637  vresult[0].vmulXX(left, vright, vresult, tol, fence);
1638  return vresult;
1639  }
1640 
1642 
1645  template <typename L, typename R, std::size_t NDIM>
1646  Function<TENSOR_RESULT_TYPE(L,R), NDIM>
1647  operator*(const Function<L,NDIM>& left, const Function<R,NDIM>& right) {
1648  if (left.is_compressed()) left.reconstruct();
1649  if (right.is_compressed()) right.reconstruct();
1650  MADNESS_ASSERT(not (left.is_on_demand() or right.is_on_demand()));
1651  return mul(left,right,true);
1652  }
1653 
1655  template<typename T, std::size_t KDIM, std::size_t LDIM>
1656  Function<T,KDIM+LDIM>
1657  hartree_product(const Function<T,KDIM>& left2, const Function<T,LDIM>& right2) {
1658 
1659  // we need both sum and difference coeffs for error estimation
1660  Function<T,KDIM>& left = const_cast< Function<T,KDIM>& >(left2);
1661  Function<T,LDIM>& right = const_cast< Function<T,LDIM>& >(right2);
1662 
1664 
1666  .k(left.k()).thresh(thresh);
1667  Function<T,KDIM+LDIM> result=factory.empty();
1668 
1669  bool same=(left2.get_impl()==right2.get_impl());
1670 
1671  // some prep work
1672  left.nonstandard(true,true);
1673  right.nonstandard(true,true);
1674 
1675  result.do_hartree_product(left.get_impl().get(),right.get_impl().get());
1676 
1677  left.standard(false);
1678  if (not same) right.standard(false);
1679  left2.world().gop.fence();
1680 
1681  return result;
1682 
1683  }
1684 
1685 
1687  template<typename T, std::size_t KDIM, std::size_t LDIM, typename opT>
1688  Function<T,KDIM+LDIM>
1690  const opT& op) {
1691 
1692  // we need both sum and difference coeffs for error estimation
1693  Function<T,KDIM>& left = const_cast< Function<T,KDIM>& >(left2);
1694  Function<T,LDIM>& right = const_cast< Function<T,LDIM>& >(right2);
1695 
1697 
1699  .k(left.k()).thresh(thresh);
1700  Function<T,KDIM+LDIM> result=factory.empty();
1701 
1702  if (result.world().rank()==0) {
1703  print("incomplete FunctionFactory in Function::hartree_product");
1704  print("thresh: ", thresh);
1705  }
1706  bool same=(left2.get_impl()==right2.get_impl());
1707 
1708  // some prep work
1709  left.nonstandard(true,true);
1710  right.nonstandard(true,true);
1711 
1712  result.do_hartree_product(left.get_impl().get(),right.get_impl().get(),&op);
1713 
1714  left.standard(false);
1715  if (not same) right.standard(false);
1716  left2.world().gop.fence();
1717 
1718  return result;
1719  }
1720 
1722  template <typename L, typename R,std::size_t NDIM>
1723  Function<TENSOR_RESULT_TYPE(L,R),NDIM>
1725  TENSOR_RESULT_TYPE(L,R) beta, const Function<R,NDIM>& right, bool fence=true) {
1726  PROFILE_FUNC;
1728  return result.gaxpy_oop(alpha, left, beta, right, fence);
1729  }
1730 
1732  template <typename L, typename R,std::size_t NDIM>
1733  Function<TENSOR_RESULT_TYPE(L,R),NDIM>
1734  add(const Function<L,NDIM>& left, const Function<R,NDIM>& right, bool fence=true) {
1735  return gaxpy_oop(TENSOR_RESULT_TYPE(L,R)(1.0), left,
1736  TENSOR_RESULT_TYPE(L,R)(1.0), right, fence);
1737  }
1738 
1739 
1741  template<typename T, std::size_t NDIM>
1743  const double beta, const Function<T,NDIM>& right, const bool fence=true) {
1744  Function<T,NDIM> result;
1745  result.set_impl(right,false);
1746 
1747  MADNESS_ASSERT(not left.is_compressed());
1748  MADNESS_ASSERT(not right.is_compressed());
1749  result.get_impl()->gaxpy_oop_reconstructed(alpha,*left.get_impl(),beta,*right.get_impl(),fence);
1750  return result;
1751 
1752  }
1753 
1755 
1757  template <typename L, typename R, std::size_t NDIM>
1758  Function<TENSOR_RESULT_TYPE(L,R), NDIM>
1759  operator+(const Function<L,NDIM>& left, const Function<R,NDIM>& right) {
1760  if (VERIFY_TREE) left.verify_tree();
1761  if (VERIFY_TREE) right.verify_tree();
1762 
1763  // no compression for high-dimensional functions
1764  if (NDIM==6) {
1765  left.reconstruct();
1766  right.reconstruct();
1767  return gaxpy_oop_reconstructed(1.0,left,1.0,right,true);
1768  } else {
1769  if (!left.is_compressed()) left.compress();
1770  if (!right.is_compressed()) right.compress();
1771  return add(left,right,true);
1772  }
1773  }
1774 
1776  template <typename L, typename R,std::size_t NDIM>
1777  Function<TENSOR_RESULT_TYPE(L,R),NDIM>
1778  sub(const Function<L,NDIM>& left, const Function<R,NDIM>& right, bool fence=true) {
1779  return gaxpy_oop(TENSOR_RESULT_TYPE(L,R)(1.0), left,
1780  TENSOR_RESULT_TYPE(L,R)(-1.0), right, fence);
1781  }
1782 
1783 
1785 
1787  template <typename L, typename R, std::size_t NDIM>
1788  Function<TENSOR_RESULT_TYPE(L,R), NDIM>
1789  operator-(const Function<L,NDIM>& left, const Function<R,NDIM>& right) {
1790  PROFILE_FUNC;
1791  // no compression for high-dimensional functions
1792  if (NDIM==6) {
1793  left.reconstruct();
1794  right.reconstruct();
1795  return gaxpy_oop_reconstructed(1.0,left,-1.0,right,true);
1796  } else {
1797  if (!left.is_compressed()) left.compress();
1798  if (!right.is_compressed()) right.compress();
1799  return sub(left,right,true);
1800  }
1801  }
1802 
1803 
1805  template <typename T, std::size_t NDIM>
1807  PROFILE_FUNC;
1808  Function<T,NDIM> result = copy(f,true); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1809  return result.square(true); //fence); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1810  }
1811 
1813  template <typename T, int NDIM>
1815  PROFILE_FUNC;
1816  Function<T,NDIM> result = copy(f,true); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1817  return result.abs(true); //fence); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1818  }
1819 
1821  template <typename T, int NDIM>
1823  PROFILE_FUNC;
1824  Function<T,NDIM> result = copy(f,true); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1825  return result.abs_square(true); //fence); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1826  }
1827 
1828 
1830 
1834  template <typename T, std::size_t NDIM>
1837  bool fence = true) {
1838  PROFILE_FUNC;
1839  f.verify();
1840  Function<T,NDIM> result;
1841  typedef FunctionImpl<T,NDIM> implT;
1842  result.set_impl(std::shared_ptr<implT>(new implT(*f.get_impl(), pmap, false)));
1843  result.get_impl()->copy_coeffs(*f.get_impl(), fence);
1844  if (VERIFY_TREE) result.verify_tree();
1845  return result;
1846  }
1847 
1849  template <typename T, std::size_t NDIM>
1851  PROFILE_FUNC;
1852  return copy(f, f.get_pmap(), fence);
1853  }
1854 
1856 
1864  template <typename T, typename Q, std::size_t NDIM>
1866  PROFILE_FUNC;
1867  f.verify();
1868  Function<Q,NDIM> result;
1869  result.set_impl(f, false);
1870  result.get_impl()->copy_coeffs(*f.get_impl(), fence);
1871  return result;
1872  }
1873 
1874 
1876 
1878  template <typename T, std::size_t NDIM>
1880  PROFILE_FUNC;
1881  Function<T,NDIM> result = copy(f,true);
1882  return result.conj(fence);
1883  }
1884 
1886 
1895  template <typename opT, typename T, std::size_t LDIM>
1896  Function<TENSOR_RESULT_TYPE(typename opT::opT,T), LDIM+LDIM>
1897  apply(const opT& op, const Function<T,LDIM>& f1, const Function<T,LDIM>& f2, bool fence=true) {
1898 
1899  typedef TENSOR_RESULT_TYPE(T,typename opT::opT) resultT;
1900 
1901  Function<T,LDIM>& ff1 = const_cast< Function<T,LDIM>& >(f1);
1902  Function<T,LDIM>& ff2 = const_cast< Function<T,LDIM>& >(f2);
1903 
1904  bool same=(ff1.get_impl()==ff2.get_impl());
1905 
1906  // keep the leaves! They are assumed to be there later
1907  // even for modified op we need NS form for the hartree_leaf_op
1908  if (not same) ff1.nonstandard(true,false);
1909  ff2.nonstandard(true,true);
1910 
1911 
1914  Function<resultT,LDIM+LDIM> result=factory.empty().fence();
1915 
1916  result.get_impl()->reset_timer();
1917  op.reset_timer();
1918 
1919  // will fence here
1920  result.get_impl()->recursive_apply(op, f1.get_impl().get(),f2.get_impl().get(),true);
1921 
1922  result.get_impl()->print_timer();
1923  op.print_timer();
1924 
1925  result.get_impl()->finalize_apply(true); // need fence before reconstruct
1926 
1927  if (op.modified()) {
1928  result.get_impl()->trickle_down(true);
1929  } else {
1930  result.reconstruct();
1931  }
1932  if (not same) ff1.standard(false);
1933  ff2.standard(false);
1934 
1935  return result;
1936  }
1937 
1938 
1940  template <typename opT, typename R, std::size_t NDIM>
1941  Function<TENSOR_RESULT_TYPE(typename opT::opT,R), NDIM>
1942  apply_only(const opT& op, const Function<R,NDIM>& f, bool fence=true) {
1943  PROFILE_FUNC;
1946 
1947  result.set_impl(f, true);
1948  r1.set_impl(f, true);
1949 
1950  result.get_impl()->reset_timer();
1951  op.reset_timer();
1952 
1953 // result.get_impl()->apply_source_driven(op, *f.get_impl(), true);
1954  result.get_impl()->recursive_apply(op, f.get_impl().get(),
1955  r1.get_impl().get(),true); // will fence here
1956 
1957 
1958  double time=result.get_impl()->finalize_apply(fence); // need fence before reconstruction
1959  if (opT::opdim==6) {
1960  result.get_impl()->print_timer();
1961  op.print_timer();
1962  if (result.world().rank()==0) print("time in finlize_apply", time);
1963  }
1964 
1965  return result;
1966  }
1967 
1969 
1973  template <typename opT, typename R, std::size_t NDIM>
1974  Function<TENSOR_RESULT_TYPE(typename opT::opT,R), NDIM>
1975  apply(const opT& op, const Function<R,NDIM>& f, bool fence=true) {
1976 
1977  typedef TENSOR_RESULT_TYPE(typename opT::opT,R) resultT;
1978  Function<R,NDIM>& ff = const_cast< Function<R,NDIM>& >(f);
1979  Function<resultT, NDIM> result;
1980 
1981  MADNESS_ASSERT(not f.is_on_demand());
1982 
1983  if (VERIFY_TREE) ff.verify_tree();
1984  ff.reconstruct();
1985  if (opT::opdim==6) ff.print_size("ff in apply after reconstruct");
1986 
1987  if (op.modified()) {
1988 
1989  MADNESS_ASSERT(not op.is_slaterf12);
1990  ff.get_impl()->make_redundant(true);
1991  result = apply_only(op, ff, fence);
1992  ff.get_impl()->undo_redundant(false);
1993  result.get_impl()->trickle_down(true);
1994 
1995  } else {
1996 
1997  // the slaterf12 function is
1998  // 1/(2 mu) \int d1 (1 - exp(- mu r12)) f(1)
1999  // = 1/(2 mu) (f.trace() - \int d1 exp(-mu r12) f(1) )
2000  // f.trace() is just a number
2001  R ftrace=0.0;
2002  if (op.is_slaterf12) ftrace=f.trace();
2003 
2004  // saves the standard() step, which is very expensive in 6D
2005 // Function<R,NDIM> fff=copy(ff);
2006  Function<R,NDIM> fff=(ff);
2007  fff.nonstandard(op.doleaves, true);
2008  if (opT::opdim==6) fff.print_size("ff in apply after nonstandard");
2009  if ((opT::opdim==6) and (f.world().rank()==0)) {
2010  fff.get_impl()->timer_filter.print("filter");
2011  fff.get_impl()->timer_compress_svd.print("compress_svd");
2012  }
2013  result = apply_only(op, fff, fence);
2014  result.reconstruct();
2015 // fff.clear();
2016  if (op.destructive()) {
2017  ff.world().gop.fence();
2018  ff.clear();
2019  } else {
2020  ff.standard();
2021  }
2022  if (op.is_slaterf12) result=(result-ftrace).scale(-0.5/op.mu());
2023 
2024  }
2025  if (opT::opdim==6) result.print_size("result after reconstruction");
2026  return result;
2027  }
2028 
2029 
2030  template <typename opT, typename R, std::size_t NDIM>
2031  Function<TENSOR_RESULT_TYPE(typename opT::opT,R), NDIM>
2032  apply_1d_realspace_push(const opT& op, const Function<R,NDIM>& f, int axis, bool fence=true) {
2033  PROFILE_FUNC;
2034  Function<R,NDIM>& ff = const_cast< Function<R,NDIM>& >(f);
2035  if (VERIFY_TREE) ff.verify_tree();
2036  ff.reconstruct();
2037 
2039 
2040  result.set_impl(ff, false);
2041  result.get_impl()->apply_1d_realspace_push(op, ff.get_impl().get(), axis, fence);
2042  return result;
2043  }
2044 
2045 
2047 
2057  template <typename T, std::size_t NDIM>
2058  Function<T,NDIM>
2059  mapdim(const Function<T,NDIM>& f, const std::vector<long>& map, bool fence=true) {
2060  PROFILE_FUNC;
2061  Function<T,NDIM> result;
2062  return result.mapdim(f,map,fence);
2063  }
2064 
2066 
2071  template <typename T, std::size_t NDIM>
2072  Function<T,NDIM>
2073  symmetrize(const Function<T,NDIM>& f, const std::string symmetry, bool fence=true) {
2074  Function<T,NDIM> result;
2075 
2076  MADNESS_ASSERT(NDIM==6); // works only for pair functions
2077  std::vector<long> map(NDIM);
2078 
2079  // symmetric particle permutation
2080  if (symmetry=="sy_particle") {
2081  map[0]=3; map[1]=4; map[2]=5;
2082  map[3]=0; map[4]=1; map[5]=2;
2083  } else if (symmetry=="cx") {
2084  map[0]=0; map[1]=2; map[2]=1;
2085  map[3]=3; map[4]=5; map[5]=4;
2086 
2087  } else if (symmetry=="cy") {
2088  map[0]=2; map[1]=1; map[2]=0;
2089  map[3]=5; map[4]=4; map[5]=3;
2090 
2091  } else if (symmetry=="cz") {
2092  map[0]=1; map[1]=0; map[2]=2;
2093  map[3]=4; map[4]=3; map[5]=5;
2094 
2095  } else {
2096  if (f.world().rank()==0) {
2097  print("unknown parameter in symmetrize:",symmetry);
2098  }
2099  MADNESS_EXCEPTION("unknown parameter in symmetrize",1);
2100  }
2101 
2102  result.mapdim(f,map,true); // need to fence here
2103  result.get_impl()->average(*f.get_impl());
2104 
2105  return result;
2106  }
2107 
2108 
2110 
2115  template<typename T, std::size_t NDIM, std::size_t LDIM>
2116  Function<T,NDIM> multiply(const Function<T,NDIM> f, const Function<T,LDIM> g, const int particle, const bool fence=true) {
2117 
2118  MADNESS_ASSERT(LDIM+LDIM==NDIM);
2119  MADNESS_ASSERT(particle==1 or particle==2);
2120 
2121  Function<T,NDIM> result;
2122  result.set_impl(f, true);
2123 
2124  Function<T,NDIM>& ff = const_cast< Function<T,NDIM>& >(f);
2125  Function<T,LDIM>& gg = const_cast< Function<T,LDIM>& >(g);
2126 
2127  if (0) {
2128  gg.nonstandard(true,false);
2129  ff.nonstandard(true,false);
2130  result.world().gop.fence();
2131 
2132  result.get_impl()->multiply(ff.get_impl().get(),gg.get_impl().get(),particle);
2133  result.world().gop.fence();
2134 
2135  gg.standard(false);
2136  ff.standard(false);
2137  result.world().gop.fence();
2138 
2139  } else {
2140  FunctionImpl<T,NDIM>* fimpl=ff.get_impl().get();
2141  FunctionImpl<T,LDIM>* gimpl=gg.get_impl().get();
2142  gimpl->make_redundant(true);
2143  fimpl->make_redundant(false);
2144  result.world().gop.fence();
2145 
2146  result.get_impl()->multiply(fimpl,gimpl,particle);
2147  result.world().gop.fence();
2148 
2149  fimpl->undo_redundant(false);
2150  gimpl->undo_redundant(fence);
2151  }
2152 
2153 // if (particle==1) result.print_size("finished multiplication f(1,2)*g(1)");
2154 // if (particle==2) result.print_size("finished multiplication f(1,2)*g(2)");
2155 
2156  return result;
2157  }
2158 
2159 
2160  template <typename T, std::size_t NDIM>
2161  Function<T,NDIM>
2165  bool fence=true)
2166  {
2167  PROFILE_FUNC;
2168  Function<T,NDIM> result = FunctionFactory<T,NDIM>(other.world()).k(k).thresh(thresh).empty();
2169  other.reconstruct();
2170  result.get_impl()->project(*other.get_impl(),fence);
2171  return result;
2172  }
2173 
2174 
2176 
2178  template <typename T, typename R, std::size_t NDIM>
2180  PROFILE_FUNC;
2181  return f.inner(g);
2182  }
2183 
2184  template <typename T, typename R, std::size_t NDIM>
2185  typename IsSupported<TensorTypeData<R>, Function<TENSOR_RESULT_TYPE(T,R),NDIM> >::type
2187  return (f*R(1.0)).add_scalar(r);
2188  }
2189 
2190  template <typename T, typename R, std::size_t NDIM>
2191  typename IsSupported<TensorTypeData<R>, Function<TENSOR_RESULT_TYPE(T,R),NDIM> >::type
2193  return (f*R(1.0)).add_scalar(r);
2194  }
2195 
2196  template <typename T, typename R, std::size_t NDIM>
2197  typename IsSupported<TensorTypeData<R>, Function<TENSOR_RESULT_TYPE(T,R),NDIM> >::type
2199  return (f*R(1.0)).add_scalar(-r);
2200  }
2201 
2202  template <typename T, typename R, std::size_t NDIM>
2203  typename IsSupported<TensorTypeData<R>, Function<TENSOR_RESULT_TYPE(T,R),NDIM> >::type
2205  return (f*R(-1.0)).add_scalar(r);
2206  }
2207 
2208  namespace detail {
2209  template <std::size_t NDIM>
2210  struct realop {
2211  typedef double resultT;
2212  Tensor<double> operator()(const Key<NDIM>& key, const Tensor<double_complex>& t) const {
2213  return real(t);
2214  }
2215 
2216  template <typename Archive> void serialize (Archive& ar) {}
2217  };
2218 
2219  template <std::size_t NDIM>
2220  struct imagop {
2221  typedef double resultT;
2222  Tensor<double> operator()(const Key<NDIM>& key, const Tensor<double_complex>& t) const {
2223  return imag(t);
2224  }
2225 
2226  template <typename Archive> void serialize (Archive& ar) {}
2227  };
2228 
2229  template <std::size_t NDIM>
2230  struct abssqop {
2231  typedef double resultT;
2232  Tensor<double> operator()(const Key<NDIM>& key, const Tensor<double_complex>& t) const {
2233  Tensor<double> r = abs(t);
2234  return r.emul(r);
2235  }
2236 
2237  template <typename Archive> void serialize (Archive& ar) {}
2238  };
2239  }
2240 
2242  template <std::size_t NDIM>
2245  }
2246 
2248  template <std::size_t NDIM>
2251  }
2252 
2254  template <std::size_t NDIM>
2256  return unary_op(z, detail::abssqop<NDIM>(), fence);
2257  }
2258 
2259 
2260 }
2261 
2262 #include <madness/mra/funcplot.h>
2263 
2264 namespace madness {
2265  namespace archive {
2266  template <class T, std::size_t NDIM>
2268  static inline void load(const ParallelInputArchive& ar, Function<T,NDIM>& f) {
2269  f.load(*ar.get_world(), ar);
2270  }
2271  };
2272 
2273  template <class T, std::size_t NDIM>
2275  static inline void store(const ParallelOutputArchive& ar, const Function<T,NDIM>& f) {
2276  f.store(ar);
2277  }
2278  };
2279  }
2280 
2281 
2282 }
2283 /* @} */
2284 
2285 #include <madness/mra/derivative.h>
2286 #include <madness/mra/operator.h>
2287 #include <madness/mra/functypedefs.h>
2288 #include <madness/mra/vmra.h>
2289 // #include <madness/mra/mraimpl.h> !!!!!!!!!!!!! NOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO !!!!!!!!!!!!!!!!!!
2290 
2291 #endif // MADNESS_MRA_MRA_H__INCLUDED
MADNESS_ASSERT(is_compressed())
void unaryop(const opT &op, bool fence=true)
Inplace unary operation on function values.
Definition: mra.h:849
T operator()(const coordT &xuser) const
Evaluates the function at a point in user coordinates. Collective operation.
Definition: mra.h:370
std::shared_ptr< FunctionFunctorInterface< T, NDIM > > func
Definition: mra.h:1271
SimpleUnaryOpWrapper(T(*f)(T))
Definition: mra.h:832
WorldGopInterface & gop
Global operations.
Definition: worldfwd.h:462
Function< TENSOR_RESULT_TYPE(L, R), NDIM > operator+(const Function< L, NDIM > &left, const Function< R, NDIM > &right)
Adds two functions with the new result being of type TensorResultType
Definition: mra.h:1759
bool is_initialized() const
Returns true if the function is initialized.
Definition: mra.h:146
void vtransform(const std::vector< Function< R, NDIM > > &v, const Tensor< Q > &c, std::vector< Function< T, NDIM > > &vresult, double tol, bool fence=true)
sparse transformation of a vector of functions ... private
Definition: mra.h:1460
Definition: shared_ptr_bits.h:38
IsSupported< TensorTypeData< Q >, Function< T, NDIM > >::type & operator*=(const Q q)
Inplace scaling by a constant.
Definition: mra.h:984
Function< T, NDIM > gaxpy_oop_reconstructed(const double alpha, const Function< T, NDIM > &left, const double beta, const Function< T, NDIM > &right, const bool fence=true)
Returns new function alpha*left + beta*right optional fence, having both addends reconstructed.
Definition: mra.h:1742
Function< T, NDIM > & fill_tree(const opT &op, bool fence=true)
With this being an on-demand function, fill the MRA tree according to different criteria.
Definition: mra.h:1077
bool autorefine() const
Returns value of autorefine flag. No communication.
Definition: mra.h:523
T norm(Vector< T, N > v)
Compute norm of a Vector.
Definition: array.h:447
double imag(double x)
Definition: complexfun.h:56
Function< T, NDIM > & truncate(double tol=0.0, bool fence=true)
Truncate the function with optional fence. Compresses with fence if not compressed.
Definition: mra.h:577
Header to declare stuff which has not yet found a home.
FunctionDefaults holds default paramaters as static class members.
Definition: funcdefaults.h:175
void serialize(Archive &ar)
Definition: mra.h:2226
Void gaxpy_ext(const Function< L, NDIM > &left, T(*f)(const coordT &), T alpha, T beta, double tol, bool fence=true) const
Definition: mra.h:1249
Tensor< T > eval_cube(const Tensor< double > &cell, const std::vector< long > &npt, bool eval_refine=false) const
Evaluates a cube/slice of points (probably for plotting) ... collective but no fence necessary...
Definition: mra.h:329
TENSOR_RESULT_TYPE(T, R) inner_local(const Function<R
Returns local part of inner product ... throws if both not compressed.
T inner_ext(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine=true, const bool keep_redundant=false) const
Definition: mra.h:1233
const int NDIM
Definition: tdse1.cc:44
Function< T, NDIM > & gaxpy_oop(T alpha, const Function< L, NDIM > &left, T beta, const Function< R, NDIM > &right, bool fence)
This is replaced with alpha*left + beta*right ... private.
Definition: mra.h:1471
Function(const Function< T, NDIM > &f)
Copy constructor is shallow. No communication, works in either basis.
Definition: mra.h:164
An archive for storing local or parallel data wrapping BinaryFstreamOutputArchive.
Definition: parar.h:241
Future< T > eval(const coordT &xuser) const
Evaluates the function at a point in user coordinates. Possible non-blocking comm.
Definition: mra.h:185
void startup(World &world, int argc, char **argv)
Definition: startup.cc:44
const Function< T, NDIM > & compress(bool fence=true) const
Compresses the function, transforming into wavelet basis. Possible non-blocking comm.
Definition: mra.h:683
void load(World &world, Archive &ar)
Replaces this function with one loaded from an archive using the default processor map...
Definition: mra.h:1324
const std::shared_ptr< FunctionImpl< T, NDIM > > & get_impl() const
Returns a shared-pointer to the implementation.
Definition: mra.h:589
Provides typedefs to hide use of templates and to increase interoperability.
const mpreal dim(const mpreal &a, const mpreal &b, mp_rnd_t rnd_mode)
Definition: mpreal.h:2201
T inner_ext_local(T(*f)(const coordT &), const bool leaf_refine=true, const bool keep_redundant=false) const
Definition: mra.h:1183
Implements ParallelInputArchive and ParallelOutputArchive.
const double L
Definition: 3dharmonic.cc:123
void nonstandard(bool keepleaves, bool fence=true)
Compresses the function retaining scaling function coeffs. Possible non-blocking comm.
Definition: mra.h:701
void serialize(Archive &ar)
Definition: mra.h:2237
void mul_on_demand(const Function< L, NDIM > &f, const Function< R, NDIM > &g, bool fence=true)
Same as operator* but with optional fence and no automatic reconstruction.
Definition: mra.h:1442
::std::string string
Definition: gtest-port.h:872
Function< T, NDIM > & fill_tree(bool fence=true)
With this being an on-demand function, fill the MRA tree according to different criteria.
Definition: mra.h:1090
void set_autorefine(bool value, bool fence=true)
Sets the value of the autorefine flag. Optional global fence.
Definition: mra.h:533
returns true if the result of a hartree_product is a leaf node (compute norm & error) ...
Definition: funcimpl.h:493
Vector< double, NDIM > coordT
Type of vector holding coordinates.
Definition: mra.h:138
T operator()(double x, double y=0, double z=0, double xx=0, double yy=0, double zz=0) const
Evaluates the function at a point in user coordinates. Collective operation.
Definition: mra.h:384
void serialize(Archive &ar)
Definition: mra.h:2216
double check_symmetry() const
check symmetry of a function by computing the 2nd derivative
Definition: mra.h:1496
Definition: type_data.h:150
double err(const funcT &func) const
Returns an estimate of the difference ||this-func|| ... global sum performed.
Definition: mra.h:439
static void doconj(const Key< NDIM >, Tensor< T > &t)
Definition: mra.h:877
void refine_to_common_level(std::vector< Function< T, NDIM > > &vf, bool fence=true)
Refine vector of functions down to common finest level (reconstructs as necessary, optional fence)
Definition: mra.h:1386
double resultT
Definition: mra.h:2211
Function< TENSOR_RESULT_TYPE(typename opT::opT, R), NDIM > apply_1d_realspace_push(const opT &op, const Function< R, NDIM > &f, int axis, bool fence=true)
Definition: mra.h:2032
Objects that implement their own parallel archive interface should derive from this.
Definition: parar.h:52
FunctionImpl< T, NDIM > implT
Definition: mra.h:135
Function< T, NDIM > & fill_tree(const Function< R, NDIM > &g, bool fence=true)
With this being an on-demand function, fill the MRA tree according to different criteria.
Definition: mra.h:1060
Definition: mra.h:2220
returns true if the function has a leaf node at key (works only locally)
Definition: funcimpl.h:434
NDIM & f
Definition: mra.h:2179
Function< TENSOR_RESULT_TYPE(typename opT::opT, R), NDIM > apply_only(const opT &op, const Function< R, NDIM > &f, bool fence=true)
Apply operator ONLY in non-standard form - required other steps missing !!
Definition: mra.h:1942
Function< T, NDIM > & add_scalar(T t, bool fence=true)
Inplace add scalar. No communication except for optional fence.
Definition: mra.h:906
void verify_tree() const
Verifies the tree data structure ... global sync implied.
Definition: mra.h:452
Declaration and initialization of tree traversal functions and generic derivative.
void serialize(Archive &ar)
Definition: mra.h:836
Provides FunctionDefaults and utilities for coordinate transformation.
static const double & get_thresh()
Returns the default threshold.
Definition: funcdefaults.h:225
static std::vector< std::shared_ptr< FunctionImpl< Q, D > > > vimpl(const std::vector< Function< Q, D > > &v)
Returns vector of FunctionImpl pointers corresponding to vector of functions.
Definition: mra.h:1378
Function< T, KDIM+LDIM > hartree_product(const Function< T, KDIM > &left2, const Function< T, LDIM > &right2)
Performs a Hartree product on the two given low-dimensional functions.
Definition: mra.h:1657
void set_functor(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > functor)
Replace the current functor with the provided new one.
Definition: mra.h:606
void set_thresh(double value, bool fence=true)
Sets the vaule of the truncation threshold. Optional global fence.
Definition: mra.h:552
Function< TENSOR_RESULT_TYPE(L, R), NDIM > add(const Function< L, NDIM > &left, const Function< R, NDIM > &right, bool fence=true)
Same as operator+ but with optional fence and no automatic compression.
Definition: mra.h:1734
bool operator()(implT *impl, const Key< NDIM > &key, const nodeT &t) const
Definition: mra.h:766
Default store of a thingy via serialize(ar,t)
Definition: archive.h:708
bool is_on_demand() const
Definition: mra.h:611
void operator()(const Key< NDIM > &key, Tensor< T > &t) const
Definition: mra.h:833
Function< T, NDIM > & scale(const Q q, bool fence=true)
Inplace, scale the function by a constant. No communication except for optional fence.
Definition: mra.h:896
std::size_t tree_size() const
Returns the number of nodes in the function tree ... collective global sum.
Definition: mra.h:471
const double beta
Definition: gygi_soltion.cc:63
FunctionImpl holds all Function state to facilitate shallow copy semantics.
Definition: funcdefaults.h:48
This header should include pretty much everything needed for the parallel runtime.
T inner(const vecfunc< T, NDIM > &a, const vecfunc< T, NDIM > &b)
the non-linear solver requires an inner product
Definition: nemo.h:112
#define PROFILE_MEMBER_FUNC(classname)
Definition: worldprofile.h:199
Function< T, NDIM > & reduce_rank(const bool fence=true)
reduce the rank of the coefficient tensors
Definition: mra.h:1510
Function< T, NDIM > & operator-=(const Function< Q, NDIM > &other)
Inplace subtraction of functions in the wavelet basis.
Definition: mra.h:963
Provides FunctionCommonData, FunctionImpl and FunctionFactory.
Function< double, NDIM > abssq(const Function< double_complex, NDIM > &z, bool fence=true)
Returns a new function that is the square of the absolute value of the input.
Definition: mra.h:2255
Definition: mra.h:2230
void do_hartree_product(const FunctionImpl< T, LDIM > *left, const FunctionImpl< T, KDIM > *right, const opT *op)
perform the hartree product of f*g, invoked by result
Definition: mra.h:1103
return impl inner_local * g())
Defines and implements most of Tensor.
FunctionFactory & fence(bool fence=true)
Definition: function_factory.h:217
T inner_ext(T(*f)(const coordT &), const bool leaf_refine=true, const bool keep_redundant=false) const
Definition: mra.h:1199
Function< T, NDIM > conj(bool fence=true)
Inplace complex conjugate. No communication except for optional fence.
Definition: mra.h:885
Implements most functionality of separated operators.
FunctionFactory & empty()
Definition: function_factory.h:192
World & world() const
Returns the world.
Definition: mra.h:622
Function< T, NDIM > & operator+=(const Function< Q, NDIM > &other)
Inplace addition of functions in the wavelet basis.
Definition: mra.h:943
Function< T, NDIM > & abs(bool fence=true)
Returns *this for chaining.
Definition: mra.h:1003
Definition: mra.h:102
T * get() const
Definition: shared_ptr_bits.h:485
Tensor< double > operator()(const Key< NDIM > &key, const Tensor< double_complex > &t) const
Definition: mra.h:2222
Function< T, NDIM > & multiop_values(const opT &op, const std::vector< Function< T, NDIM > > &vf)
This is replaced with op(vector of functions) ... private.
Definition: mra.h:1407
leaf_op< T, NDIM > fnode_is_leaf(this->get_impl().get())
double norm2() const
Returns the 2-norm of the function ... global sum ... works in either basis.
Definition: mra.h:650
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
Function< Q, NDIM > convert(const Function< T, NDIM > &f, bool fence=true)
Type conversion implies a deep copy. No communication except for optional fence.
Definition: mra.h:1865
Defines operations on vectors of FunctionsThis file defines a number of operations on vectors of func...
Function< T, NDIM > & abs_square(bool fence=true)
Returns *this for chaining.
Definition: mra.h:1012
int k() const
Returns the number of multiwavelets (k). No communication.
Definition: mra.h:561
Function< typename opT::resultT, NDIM > & unary_op_coeffs(const Function< Q, NDIM > &func, const opT &op, bool fence)
This is replaced with left*right ... private.
Definition: mra.h:1365
void reconstruct(bool fence=true) const
Reconstructs the function, transforming into scaling function basis. Possible non-blocking comm...
Definition: mra.h:737
Definition: funcimpl.h:557
FunctionFactory< T, NDIM > factoryT
Definition: mra.h:137
void clear(bool fence=true)
Clears the function as if constructed uninitialized. Optional fence.
Definition: mra.h:799
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence. ...
Definition: mra.h:1835
T trace() const
Returns global value of int(f(x),x) ... global comm required.
Definition: mra.h:1034
Function< TENSOR_RESULT_TYPE(L, R), NDIM > sub(const Function< L, NDIM > &left, const Function< R, NDIM > &right, bool fence=true)
Same as operator- but with optional fence and no automatic compression.
Definition: mra.h:1778
void unaryop_coeff(const opT &op, bool fence=true)
Unary operation applied inplace to the coefficients.
Definition: mra.h:859
void refine_general(const opT &op, bool fence=true) const
Inplace autorefines the function. Optional fence. Possible non-blocking comm.
Definition: mra.h:757
T(* f)(T)
Definition: mra.h:831
void set_impl(const Function< R, NDIM > &f, bool zero=true)
Replace current FunctionImpl with a new one using the same parameters & map as f. ...
Definition: mra.h:617
Provides IndexIterator.
~Function()
Destruction of any underlying implementation is deferred to next global fence.
Definition: mra.h:177
T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine=true, const bool keep_redundant=false) const
Definition: mra.h:1217
This class is used to specify boundary conditions for all operatorsExterior boundary conditions (i...
Definition: funcdefaults.h:72
Default load of a thingy via serialize(ar,t)
Definition: archive.h:718
#define UNARY_OPTIMIZED_ITERATOR(X, x, exp)
Definition: tensor_macros.h:658
TensorArgs holds the arguments for creating a LowRankTensor.
Definition: gentensor.h:127
std::size_t max_nodes() const
Returns the max number of nodes on a processor.
Definition: mra.h:500
Function< T, NDIM > & operator=(const Function< T, NDIM > &f)
Assignment is shallow. No communication, works in either basis.
Definition: mra.h:170
static void store(const ParallelOutputArchive &ar, const Function< T, NDIM > &f)
Definition: mra.h:2275
bool & is_on_demand()
Definition: mraimpl.h:269
tensorT sqrt(const tensorT &s, double tol=1e-8)
Computes matrix square root (not used any more?)
Definition: DFcode/moldft.cc:446
A parallel world with full functionality wrapping an MPI communicator.
Definition: worldfwd.h:416
void fence()
Synchronizes all processes in communicator AND globally ensures no pending AM or tasks.
Definition: worldgop.cc:52
Function< T, NDIM > & square(bool fence=true)
Inplace squaring of function ... global comm only if not reconstructed.
Definition: mra.h:994
void verify() const
Asserts that the function is initialized.
Definition: mra.h:141
void print_info() const
Print a summary of the load balancing info.
Definition: mra.h:825
void refine(bool fence=true)
Inplace autorefines the function using same test as for squaring.
Definition: mra.h:774
return local
Definition: mra.h:1172
A multiresolution adaptive numerical function.
Definition: derivative.h:61
void unaryop_node(const opT &op, bool fence=true)
Unary operation applied inplace to the nodes.
Definition: mra.h:869
int Level
Definition: key.h:58
NDIM &g const
Definition: mra.h:1046
void print_size(const std::string name) const
print some info about this
Definition: mra.h:478
const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > & get_pmap() const
Returns a shared pointer to the process map.
Definition: mra.h:630
void broaden(const BoundaryConditions< NDIM > &bc=FunctionDefaults< NDIM >::get_bc(), bool fence=true) const
Inplace broadens support in scaling function basis.
Definition: mra.h:779
void vmulXX(const Function< L, NDIM > &left, const std::vector< Function< R, NDIM > > &right, std::vector< Function< T, NDIM > > &result, double tol, bool fence)
Multiplication of function * vector of functions using recursive algorithm of mulxx.
Definition: mra.h:1419
FunctionNode holds the coefficients, etc., at each node of the 2^NDIM-tree.
Definition: derivative.h:58
double errsq_local(const funcT &func) const
Returns an estimate of the difference ||this-func||^2 from local data.
Definition: mra.h:424
Abstract base class interface required for functors used as input to Functions.
Definition: function_interface.h:58
Function< typename opT::resultT, NDIM > unary_op(const Function< Q, NDIM > &func, const opT &op, bool fence=true)
Out of place application of unary operation to function values with optional fence.
Definition: mra.h:1607
impl world gop fence()
void store(Archive &ar) const
Stores the function to an archive.
Definition: mra.h:1345
std::size_t max_local_depth() const
Returns the maximum local depth of the function tree ... no communications.
Definition: mra.h:492
double real(double x)
Definition: complexfun.h:52
World * get_world() const
Returns pointer to the world.
Definition: parar.h:98
static void load(const ParallelInputArchive &ar, Function< T, NDIM > &f)
Definition: mra.h:2268
Function< TENSOR_RESULT_TYPE(L, R), NDIM > mul_sparse(const Function< L, NDIM > &left, const Function< R, NDIM > &right, double tol, bool fence=true)
Sparse multiplication — left and right must be reconstructed and if tol!=0 have tree of norms alread...
Definition: mra.h:1569
Tensor< double > operator()(const Key< NDIM > &key, const Tensor< double_complex > &t) const
Definition: mra.h:2212
void print_tree_graphviz(std::ostream &os=std::cout) const
Process 0 prints a graphviz-formatted output of all nodes in the tree (collective) ...
Definition: mra.h:815
void reset()
Definition: shared_ptr_bits.h:459
void set_impl(const std::shared_ptr< FunctionImpl< T, NDIM > > &impl)
Replace current FunctionImpl with provided new one.
Definition: mra.h:596
Function< TENSOR_RESULT_TYPE(Q, T), NDIM > operator*(const Function< T, NDIM > &f, const Q alpha)
Returns new function equal to f(x)*alpha.
Definition: mra.h:1553
double resultT
Definition: mra.h:2221
double norm2sq_local() const
Returns the square of the norm of the local function ... no communication.
Definition: mra.h:640
Definition: funcimpl.h:603
impl world gop sum(local)
remote_refT remote_ref(World &world) const
Returns a structure used to pass references to another process.
Definition: worldfut.h:552
if(VERIFY_TREE) verify_tree()
Definition: mra.h:2210
const double delta
Definition: dielectric_external_field.cc:120
Tensor< T > coeffs_for_jun(Level n, long mode=0)
Get the scaling function coeffs at level n starting from NS form.
Definition: mra.h:788
void change_tensor_type(const TensorArgs &targs, bool fence=true)
change the tensor type of the coefficients in the FunctionNode
Definition: mra.h:1357
std::size_t size() const
Returns the number of coefficients in the function ... collective global sum.
Definition: mra.h:515
Implements (2nd generation) static load/data balancing for functions.
Defines/implements plotting interface for functions.
Function< T, NDIM > & gaxpy(const T &alpha, const Function< Q, NDIM > &other, const R &beta, bool fence=true)
Inplace, general bi-linear operation in wavelet basis. No communication except for optional fence...
Definition: mra.h:924
Implements WorldContainer.
Function< TENSOR_RESULT_TYPE(L, R), NDIM > operator-(const Function< L, NDIM > &left, const Function< R, NDIM > &right)
Subtracts two functions with the new result being of type TensorResultType
Definition: mra.h:1789
A future is a possibly yet unevaluated value.
Definition: ref.h:210
A type you can return when you want to return void ... use "return None".
Definition: typestuff.h:154
Future< Level > evaldepthpt(const coordT &xuser) const
Definition: mra.h:251
std::size_t min_nodes() const
Returns the min number of nodes on a processor.
Definition: mra.h:507
void unaryop(T(*f)(T))
Inplace unary operation on function values.
Definition: mra.h:840
Function()
Default constructor makes uninitialized function. No communication.
Definition: mra.h:153
void sum_down(bool fence=true) const
Sums scaling coeffs down tree restoring state with coeffs only at leaves. Optional fence...
Definition: mra.h:746
Tensor< double > op(const Tensor< double > &x)
Definition: kain.cc:508
Interface to be provided by any process map.
Definition: worlddc.h:64
const T1 & f1
Definition: gtest-tuple.h:680
void print(const A &a)
Print a single item to std::cout terminating with new line.
Definition: print.h:122
void print_tree(std::ostream &os=std::cout) const
Process 0 prints a summary of all nodes in the tree (collective)
Definition: mra.h:809
Function< T, NDIM > project(const Function< T, NDIM > &other, int k=FunctionDefaults< NDIM >::get_k(), double thresh=FunctionDefaults< NDIM >::get_thresh(), bool fence=true)
Definition: mra.h:2162
void serialize(Archive &ar)
Definition: mra.h:770
#define MADNESS_EXCEPTION(msg, value)
Definition: worldexc.h:88
void norm_tree(bool fence=true) const
Initializes information about the function norm at all length scales.
Definition: mra.h:663
void standard(bool fence=true)
Converts the function from nonstandard form to standard form. Possible non-blocking comm...
Definition: mra.h:718
double thresh() const
Returns value of truncation threshold. No communication.
Definition: mra.h:542
Multidimension Key for MRA tree and associated iterators.
Future< long > evalR(const coordT &xuser) const
Evaluates the function rank at a point in user coordinates. Possible non-blocking comm...
Definition: mra.h:288
void do_hartree_product(const FunctionImpl< T, LDIM > *left, const FunctionImpl< T, KDIM > *right)
perform the hartree product of f*g, invoked by result
Definition: mra.h:1115
Tensor< double > operator()(const Key< NDIM > &key, const Tensor< double_complex > &t) const
Definition: mra.h:2232
const T1 const T2 & f2
Definition: gtest-tuple.h:680
double resultT
Definition: mra.h:2231
Function< T, NDIM > symmetrize(const Function< T, NDIM > &f, const std::string symmetry, bool fence=true)
symmetrize a function
Definition: mra.h:2073
FunctionFactory implements the named-parameter idiom for Function.
Definition: funcimpl.h:70
bool autorefine_square_test(const keyT &key, const nodeT &t) const
Returns true if this block of coeffs needs autorefining.
Definition: mraimpl.h:953
std::pair< bool, T > eval_local_only(const Vector< double, NDIM > &xuser, Level maxlevel) const
Evaluate function only if point is local returning (true,value); otherwise return (false...
Definition: mra.h:219
void undo_redundant(const bool fence)
convert this from redundant to standard reconstructed form
Definition: mraimpl.h:1504
Function< T, NDIM > multiply(const Function< T, NDIM > f, const Function< T, LDIM > g, const int particle, const bool fence=true)
multiply a high-dimensional function with a low-dimensional function
Definition: mra.h:2116
FunctionNode< T, NDIM > nodeT
Definition: mra.h:136
returns true if the node is well represented compared to its parent
Definition: funcimpl.h:456
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
Function< T, NDIM > apply(const Derivative< T, NDIM > &D, const Function< T, NDIM > &f, bool fence=true)
Applies derivative operator to function (for syntactic equivalence to integral operator apply) ...
Definition: derivative.h:613
const double c
Definition: gfit.cc:200
T trace_local() const
Returns local contribution to int(f(x),x) ... no communication.
Definition: mra.h:1025
std::size_t max_depth() const
Returns the maximum depth of the function tree ... collective global sum.
Definition: mra.h:484
Function< TENSOR_RESULT_TYPE(T, R), NDIM-LDIM > project_out(const Function< R, LDIM > &g, const int dim) const
project this on the low-dim function g: h(x) =
Definition: mra.h:1294
Function< TENSOR_RESULT_TYPE(L, R), NDIM > binary_op(const Function< L, NDIM > &left, const Function< R, NDIM > &right, const opT &op, bool fence=true)
Generate new function = op(left,right) where op acts on the function values.
Definition: mra.h:1593
An archive for storing local or parallel data wrapping BinaryFstreamInputArchive. ...
Definition: parar.h:266
Key is the index for a node of the 2^NDIM-tree.
Definition: key.h:69
Level depthpt(const coordT &xuser) const
Definition: mra.h:405
Function< TENSOR_RESULT_TYPE(Q, T), NDIM > mul(const Q alpha, const Function< T, NDIM > &f, bool fence=true)
Returns new function equal to alpha*f(x) with optional fence.
Definition: mra.h:1528
bool is_compressed() const
Returns true if compressed, false otherwise. No communication.
Definition: mra.h:461
#define PROFILE_FUNC
Definition: worldprofile.h:198
Traits class to specify support of numeric types.
Definition: type_data.h:56
Function(const factoryT &factory)
Constructor from FunctionFactory provides named parameter idiom. Possible non-blocking communication...
Definition: mra.h:157
void make_redundant(const bool fence)
convert this to redundant, i.e. have sum coefficients on all levels
Definition: mraimpl.h:1485
Function< T, NDIM > & mapdim(const Function< T, NDIM > &f, const std::vector< long > &map, bool fence)
This is replaced with mapdim(f) ... private.
Definition: mra.h:1485