MADNESS  version 0.9
tensoriter.h
Go to the documentation of this file.
1 /*
2  This file is part of MADNESS.
3 
4  Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20  For more information please contact:
21 
22  Robert J. Harrison
23  Oak Ridge National Laboratory
24  One Bethel Valley Road
25  P.O. Box 2008, MS-6367
26 
27  email: harrisonrj@ornl.gov
28  tel: 865-241-3937
29  fax: 865-572-0680
30 
31 
32  $Id$
33 */
34 
35 
36 #ifndef MADNESS_TENSOR_TENSORITER_H__INCLUDED
37 #define MADNESS_TENSOR_TENSORITER_H__INCLUDED
38 
41 
42 #include <madness/tensor/tensor.h>
44 
45 #include <iostream>
46 #include <algorithm>
47 #include <complex>
48 
49 #include <cmath>
50 
51 
52 namespace madness {
53 
54  template <class T> class Tensor;
55  template <class T> class SliceTensor;
56 
57  static const long default_jdim = 5551212; // Never a valid dimension num.
58 
61  template <class T, class Q = T, class R = T> class TensorIterator {
62  T* _p0_save;
63  Q* _p1_save;
64  R* _p2_save;
65  public:
66  T* _p0;
67  Q* _p1;
68  R* _p2;
69  long ndim;
70  long dimj;
71  long _s0;
72  long _s1;
73  long _s2;
79 
80  TensorIterator(const Tensor<T>* t0, const Tensor<Q>* t1=0, const Tensor<R>* t2=0,
81  long iterlevel=0,
82  bool optimize=true, bool fusedim=true,
83  long jdim=default_jdim);
84 
86 
87  inline bool operator == (const TensorIterator<T,Q,R>& a) const {
88  return _p0==a._p0;
89  }
90 
91  inline bool operator != (const TensorIterator<T,Q,R>& a) const {
92  return _p0!=a._p0;
93  }
94 
96  return this;
97  };
98 
99  inline T& operator*() const {
100  return *_p0;
101  };
102 
103  void reset();
104 
105  void reuse(const Tensor<T>* t0, const Tensor<Q>* t1=0, const Tensor<R>* t2=0);
106  };
107 
109 
156  template <class T, class Q, class R>
158  const Tensor<Q>* t1,
159  const Tensor<R>* t2,
160  long iterlevel,
161  bool optimize,
162  bool fusedim,
163  long jdim) {
164 
165 
166  if (!t0) {
167  // Used to indicate end of iteration.
168  _p0 = 0;
169  return;
170  }
171 
172  //std::printf("t0=%p t1=%p t2=%p optimize=%d fusedim=%d iterlevel=%ld jdim=%ld\n",
173  //t0,t1,t2,optimize,fusedim,iterlevel,jdim);
174 
175  TENSOR_ASSERT(iterlevel==0 || iterlevel==1,"invalid iteration level",iterlevel,t0);
176 
177  // First copy basic info over
178  ndim = t0->ndim();
179  _p0_save = _p0 = const_cast<T*>(t0->ptr()); // CONSTNESS VIOLATION
180  for (int i=0; i<ndim; ++i) {
181  dim[i] = t0->dim(i);
182  stride0[i] = t0->stride(i);
183  }
184  if (t1) {
185  TENSOR_ASSERT(t0->conforms(*t1),"first and second tensors do not conform",
186  0, t0);
187  _p1_save = _p1 = const_cast<Q*>(t1->ptr()); // CONSTNESS VIOLATION
188  for (int i=0; i<ndim; ++i) stride1[i] = t1->stride(i);
189  }
190  else {
191  _p1_save = _p1 = 0;
192  }
193  if (t2) {
194  TENSOR_ASSERT(t0->conforms(*t2),"first and third tensors do not conform",
195  0, t0);
196  _p2_save = _p2 = const_cast<R*>(t2->ptr()); // CONSTNESS VIOLATION
197  for (int i=0; i<ndim; ++i) stride2[i] = t2->stride(i);
198  }
199  else {
200  _p2_save = _p2 = 0;
201  }
202 
203  if (iterlevel == 0) {
204  // Iteration will include all dimensions
205  fusedim = false;
206  jdim = ndim;
207  dimj = 0;
208  _s0 = 0;
209  _s1 = 0;
210  _s2 = 0;
211  }
212  else if (iterlevel == 1) {
213  // Apply -ve indexing convention for dimensions
214  if (jdim < 0) jdim += ndim;
215 
216  // If permissible optimize the order of dimensions excluding
217  // any non-default value of jdim.
218  if (optimize) {
219  for (int i=0; i<ndim; ++i) {
220  if (i != jdim) {
221  for (int j=i; j<ndim; ++j) {
222  if (j != jdim) {
223  if (std::abs(stride0[i]) < std::abs(stride0[j])) {
224  std::swap(stride0[i],stride0[j]);
225  if (t1) std::swap(stride1[i],stride1[j]);
226  if (t2) std::swap(stride2[i],stride2[j]);
227  std::swap(dim[i],dim[j]);
228  }
229  }
230  }
231  }
232  //std::cout << "stride0[" << i << "]=" << stride0[i] << std::endl;
233  }
234  }
235 
236  // Iterations will exclude dimension jdim, default is last one
237  if (jdim == default_jdim) {
238  jdim = ndim-1;
239  }
240  else {
241  fusedim = false;
242  }
243  TENSOR_ASSERT(jdim>=0 && jdim < ndim, "invalid index for external iteration",
244  jdim, t0);
245  ndim--;
246 
247  // Stride and dimension info for the excluded dimension
248  _s0 = stride0[jdim];
249  if (t1) {
250  _s1 = stride1[jdim];
251  }
252  else {
253  _s1 = 0;
254  }
255  if (t2) {
256  _s2 = stride2[jdim];
257  }
258  else {
259  _s2 = 0;
260  }
261  dimj = dim[jdim];
262 
263  // Collapse stride and dimension info for remaining dimensions
264  for (int i=jdim+1; i<=ndim; ++i) { // GCC COMPILER WARNING ... but need <= ... why?
265  dim[i-1] = dim[i];
266  stride0[i-1] = stride0[i];
267  }
268  if (t1) {
269  for (int i=jdim+1; i<=ndim; ++i) stride1[i-1] = stride1[i]; // GCC COMPILER WARNING ... but need <= ... why?
270  }
271  if (t2) {
272  for (int i=jdim+1; i<=ndim; ++i) stride2[i-1] = stride2[i]; // GCC COMPILER WARNING ... but need <= ... why?
273  }
274 
275  if (fusedim) { // Only if jdim=default_jdim && iterlevel=1
276  if (t2) {
277  for (int i=ndim-1; i>=0; --i) {
278  if (_s0*dimj == stride0[i] &&
279  _s1*dimj == stride1[i] &&
280  _s2*dimj == stride2[i]) {
281  dimj *= dim[i];
282  ndim--;
283  }
284  else {
285  break;
286  }
287  }
288  }
289  else if (t1) {
290  for (int i=ndim-1; i>=0; --i) {
291  if (_s0*dimj == stride0[i] &&
292  _s1*dimj == stride1[i]) {
293  dimj *= dim[i];
294  ndim--;
295  }
296  else {
297  break;
298  }
299  }
300  }
301  else {
302  for (int i=ndim-1; i>=0; --i) {
303  if (_s0*dimj == stride0[i]) {
304  dimj *= dim[i];
305  ndim--;
306  }
307  else {
308  break;
309  }
310  }
311  }
312  }
313  }
314 
315  // Initialize indices for the counter Use TENSOR_MAXDIM so reference to
316  // optimized-away dimensions is vaguely meaningful.
317  for (int i=0; i<TENSOR_MAXDIM; ++i) ind[i] = 0;
318 
319  // std::printf("ndim=%ld dimj=%ld _s0=%ld _s1=%ld _s2=%ld _p0=%p _p1=%p _p2=%p\n",
320  // ndim,dimj,_s0,_s1,_s2,_p0,_p1,_p2);
321  // for (int i=0; i<ndim; ++i) {
322  // std::printf(" %d dim=%ld stride0=%ld stride1=%ld stride2=%ld\n",
323  // i,dim[i],stride0[i],stride1[i],stride2[i]);
324  // }
325  }
326 
327  template <class T, class Q, class R>
329  long d = ndim-1;
330  if (d<0 || _p0==0) {
331  _p0 = 0;
332  return *this;
333  }
334  while (ind[d] >= (dim[d] - 1)) {
335  _p0 -= ind[d] * stride0[d];
336  if (_p1) _p1 -= ind[d] * stride1[d];
337  if (_p2) _p2 -= ind[d] * stride2[d];
338  ind[d] = 0;
339  d--;
340  if (d < 0) {
341  _p0 = 0;
342  return *this;
343  }
344  }
345  _p0 += stride0[d];
346  if (_p1) _p1 += stride1[d];
347  if (_p2) _p2 += stride2[d];
348  ++(ind[d]);
349  return *this;
350  }
351 
353  template <class T, class Q, class R>
355  _p0 = _p0_save;
356  _p1 = _p1_save;
357  _p2 = _p2_save;
358  for (int i=0; i<TENSOR_MAXDIM; ++i) ind[i] = 0;
359  }
360 
361 
363 
369  template <class T, class Q, class R>
370  void TensorIterator<T,Q,R>::reuse(const Tensor<T>* t0,
371  const Tensor<Q> *t1,
372  const Tensor<R> *t2) {
373  _p0 = _p0_save = t0->ptr();
374  if (t1) _p1 = _p1_save = t1->ptr();
375  if (t2) _p2 = _p2_save = t2->ptr();
376  for (int i=0; i<TENSOR_MAXDIM; ++i) ind[i] = 0;
377  }
378 
379 
380 }
381 #endif // MADNESS_TENSOR_TENSORITER_H__INCLUDED
TensorIterator< T, Q, R > * operator->()
Definition: tensoriter.h:95
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
const double R
Definition: dielectric.cc:191
long _s2
Definition: tensoriter.h:73
const mpreal dim(const mpreal &a, const mpreal &b, mp_rnd_t rnd_mode)
Definition: mpreal.h:2201
Q * _p1
Definition: tensoriter.h:67
long _s1
Definition: tensoriter.h:72
T * _p0
Definition: tensoriter.h:66
long ind[TENSOR_MAXDIM]
Definition: tensoriter.h:75
T & operator*() const
Definition: tensoriter.h:99
Defines and implements most of Tensor.
long dim[TENSOR_MAXDIM]
Definition: tensoriter.h:74
TensorIterator(const Tensor< T > *t0, const Tensor< Q > *t1=0, const Tensor< R > *t2=0, long iterlevel=0, bool optimize=true, bool fusedim=true, long jdim=default_jdim)
Constructor for general iterator to compose operations over up to three tensors.
Definition: tensoriter.h:157
long stride1[TENSOR_MAXDIM]
Definition: tensoriter.h:77
long stride0[TENSOR_MAXDIM]
Definition: tensoriter.h:76
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
long ndim
Definition: tensoriter.h:69
long stride2[TENSOR_MAXDIM]
Definition: tensoriter.h:78
FLOAT a(int j, FLOAT z)
Definition: y1.cc:86
#define TENSOR_MAXDIM
Definition: tensor_macros.h:194
R * _p2
Definition: tensoriter.h:68
TensorIterator< T, Q, R > & operator++()
Definition: tensoriter.h:328
bool operator!=(const TensorIterator< T, Q, R > &a) const
Definition: tensoriter.h:91
bool operator==(const TensorIterator< T, Q, R > &a) const
Definition: tensoriter.h:87
long _s0
Definition: tensoriter.h:71
void reset()
Reset the iterator back to the start ...
Definition: tensoriter.h:354
Definition: tensoriter.h:61
long dimj
Definition: tensoriter.h:70
Declares and implements TensorException.
void reuse(const Tensor< T > *t0, const Tensor< Q > *t1=0, const Tensor< R > *t2=0)
Reuse this iterator to iterate over other Tensors.
Definition: tensoriter.h:370
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
void swap(mpfr::mpreal &x, mpfr::mpreal &y)
Definition: mpreal.h:3069