MADNESS  version 0.9
Macros
tensor_macros.h File Reference

Macros for easy and efficient iteration over tensors. More...

This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define TENSOR_MAXDIM   6
 
#define IND1   _i
 Macros IND1, ..., IND6, and IND are a convenience for indexing in macro iterators. More...
 
#define IND2   _i,_j
 
#define IND3   _i,_j,_k
 
#define IND4   _i,_j,_k,_l
 
#define IND5   _i,_j,_k,_l,_m
 
#define IND6   _i,_j,_k,_l,_m,_n
 
#define IND   IND6
 
#define ITERATOR1(t, exp)
 
#define ITERATOR2(t, exp)
 
#define ITERATOR3(t, exp)
 
#define ITERATOR4(t, exp)
 
#define ITERATOR5(t, exp)
 
#define ITERATOR6(t, exp)
 
#define ITERATOR(t, exp)
 
#define UNARYITERATOR1(X, x, exp)
 
#define UNARYITERATOR2(X, x, exp)
 
#define UNARYITERATOR3(X, x, exp)
 
#define UNARYITERATOR4(X, x, exp)
 
#define UNARYITERATOR5(X, x, exp)
 
#define UNARYITERATOR6(X, x, exp)
 
#define UNARYITERATOR(X, x, exp)
 
#define BINARYITERATOR1(X, x, Y, y, exp)
 
#define BINARYITERATOR2(X, x, Y, y, exp)
 
#define BINARYITERATOR3(X, x, Y, y, exp)
 
#define BINARYITERATOR4(X, x, Y, y, exp)
 
#define BINARYITERATOR5(X, x, Y, y, exp)
 
#define BINARYITERATOR6(X, x, Y, y, exp)
 
#define BINARYITERATOR(X, x, Y, y, exp)
 
#define TERNARYITERATOR1(X, x, Y, y, Z, z, exp)
 
#define TERNARYITERATOR2(X, x, Y, y, Z, z, exp)
 
#define TERNARYITERATOR3(X, x, Y, y, Z, z, exp)
 
#define TERNARYITERATOR4(X, x, Y, y, Z, z, exp)
 
#define TERNARYITERATOR5(X, x, Y, y, Z, z, exp)
 
#define TERNARYITERATOR6(X, x, Y, y, Z, z, exp)
 
#define TERNARYITERATOR(X, x, Y, y, Z, z, exp)
 
#define UNARY_OPTIMIZED_ITERATOR(X, x, exp)
 
#define UNARY_UNOPTIMIZED_ITERATOR(X, x, exp)
 
#define UNARY_UNOPTIMIZED_ITERATOR_NESTED(X, x, exp)
 
#define BINARY_OPTIMIZED_ITERATOR(X, x, Y, y, exp)
 
#define TERNARY_OPTIMIZED_ITERATOR(X, x, Y, y, Z, z, exp)
 

Detailed Description

Macros for easy and efficient iteration over tensors.

Several different macros have been defined to make it easy to iterate over expressions involving tensors. They vary in their generality, ease of use, and efficiency.

The most general, most easy to use, but also most inefficient, and least safe, is

ITERATOR(t, expression)

where t is a Tensor of any type, size or dimension that is used to define the range of the loop indices, and expression can be nearly anything, including multi-line expressions performing arbitrary operations on multiple tensors. The loop indices, going from left to right in the dimensions, are

_i, _j, _k, ...

E.g., to add two matrices together (there are more efficient ways to do this, such as a+=b )

Tensor<long> a(4,2), b(4,2);
ITERATOR(a, a(_i,_j) += b(_i,_j));

E.g., to print out the indices of all elements of a matrix greater than 0.5;

Tensor<float> m(5,5);
m.fillrandom();
ITERATOR(m, if (m(_i,_j) > 0.5) {
cout << _i << " " << _j << endl;
});

To make it possible to index arbitrary dimension tensors, the macro IND has been defined as the indices for the highest supported dimension. E.g., to elementwise divide the contents of two tensors of unknown dimension

ITERATOR(x, x(IND)/y(IND));

Note that using IND employs bounds checking where as direct indexing with _i , etc., does not.

The generality of these macros is offset by their inefficiency and lack of safety. The inefficiency is twofold. First, the ITERATOR macro generates a separate block of code for each possible dimension. This could cause code bloat and increased compilation time. To solve this problem, the macros ITERATOR1 , ITERATOR2, etc., have been defined, with the corresponding IND1 , IND2 , etc. These macros may be applied to tensor expressions of the appropriate dimension.

The second inefficiency is at runtime, due to the explicit indexing of all the tensor expressions and the inability to optimize the order in which memory is traversed. The lack of safety is the inability to check that the tensors in the expression conform and that the indices are not out of bounds.

The safety and cost of explicit indexing are addressed by the macros UNARYITERATOR , BINARYITERATOR , and TERNARYITERATOR , along with their specialization to specific numbers of dimensions (again by appending the dimension number to the name of the macro). These macros are safer since you have to explicitly name the tensors you are iterating over, so that the macro can now check that the input tensors conform. The cost of looping is reduced by replacing explicit indexing with pointer arithmetic. These macros still define the loop indices _i , _j , etc., but also define _p0 , _p1 , etc., as pointers to the current elements of tensor argument 0, tensor argument 1, etc..

E.g., set elements of a 3-d tensor, t , of type double to a function of the indices

UNARYITERATOR(double, t, *_p0 = 1.0/(_i + _j + 1.0));

E.g., to merge two double tensors as real and imaginary parts of complex tensor of any dimension

TERNARYITERATOR(double_complex, c, double, r, double, i,
. *_p0 = double_complex(*_p1, *_p2));

However, we still have the problems that if the dimensions of a tensor have been reordered, the loops will go through memory inefficiently, and the dimension independent macros still generate redundant code blocks. Also, the innermost loop might not be very long and will be inefficient.

The most general, efficient and code-compact macros internally use the TensorIterator , which you could also use directly. Since there is no nest of explicit loops, the tensor indices are no longer available as _i , _j , etc.. Furthermore, the TensorIterator can reorder the loops to optimize the memory traversal, and fuse dimensions to make the innermost loop longer for better vectorization and reduced loop overhead.

The most efficient macros for iteration are UNARY_OPTIMIZED_ITERATOR , BINARY_OPTIMIZED_ITERATOR , and TERNARY_OPTIMIZED_ITERATOR . As before, these define the pointers _p0 , _p1, _p2 , which point to the current (and corresponding) element of each argument tensor. However, unlike the previous macros there is no guarantee that the elements are looped thru in the order expected by a simple nest of loops. Furthermore, the indices are completely unvailable. In addition to using the iterators for optimal traversal, these macros attempt to use a single loop for optimal vector performance.

E.g., the most efficient and safe way to perform the previous example of merging two double tensors as real and imaginary parts of a complex tensor of any dimension

. *_p0 = double_complex(*_p1, *_p2));

This is precisely how most internal operations are implemented.

In some situations it is necessary to preserve the expected order of loops and to not fuse dimensions. The macros UNARY_UNOPTIMIZED_ITERATOR , BINARY_UNOPTIMIZED_ITERATOR , and TERNARY_UNOPTIMIZED_ITERATOR use the TensorIterator but disable loop reordering and fusing. Once these optimizations have been turned off, the loop indices are avaiable, if needed, from the ind[] member of the iterator (which is named _iter ).

E.g., the fillindex() method is implemented as follows

long count = 0;
UNARY_UNOPTIMIZED_ITERATOR(T, (*this), *_p0 = (T) count++);

NB: None of the above iterator macros can be nested ... use the actual tensor iterator to do this.

Recommendation — for both efficiency and safety, use the optimized macros (UNARY_OPTMIZED_ITERATOR , etc.), unless it is necessary to preserve loop order, in which case use the unoptimized versions. If you need the loop indices, use the macros UNARY_ITERATOR, etc., unless you have a very general expression that they cannot handle. In this last instance, or for ease of rapid implementation, use the general ITERATOR macro first described.

Macro Definition Documentation

#define BINARY_OPTIMIZED_ITERATOR (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
if (x.iscontiguous() && y.iscontiguous() && x.size()==y.size()) { \
X* restrict _p0 = x.ptr(); \
Y* restrict _p1 = y.ptr(); \
for (long _j=0; _j<x.size(); ++_j,++_p0,++_p1) {exp;} \
} \
else { \
for (TensorIterator<REMCONST(X),REMCONST(Y)> iter=x.binary_iterator(y,1); iter._p0; ++iter) { \
long _dimj = iter.dimj; \
X* restrict _p0 = iter._p0; \
Y* restrict _p1 = iter._p1; \
long _s0 = iter._s0; \
long _s1 = iter._s1; \
for (long _j=0; _j<_dimj; ++_j, _p0+=_s0, _p1+=_s1) { \
exp; \
} \
} } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define REMCONST(TYPE)
Macro to make remove_const easier to use.
Definition: typestuff.h:257
#define restrict
Definition: config.h:403

Referenced by madness::abs(), madness::arg(), madness::conj(), madness::copy(), madness::imag(), madness::GenTensor< T >::operator GenTensor< Q >(), real_op< Q, NDIM >::operator()(), madness::abs_square_op< Q, NDIM >::operator()(), madness::real_op< Q, NDIM >::operator()(), madness::imag_op< Q, NDIM >::operator()(), madness::abs_op< Q, NDIM >::operator()(), madness::conj_op< Q, NDIM >::operator()(), madness::function_real2complex_op< Q, NDIM >::operator()(), madness::real(), madness::tensor_abs(), madness::tensor_real2complex(), madness::tensor_ximag(), and madness::tensor_xreal().

#define BINARYITERATOR (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
long _j=0, _k=0, _l=0, _m=0, _n=0; \
if (x.ndim() == 1) BINARYITERATOR1(X,x,Y,y,exp); \
else if (x.ndim() == 2) BINARYITERATOR2(X,x,Y,y,exp); \
else if (x.ndim() == 3) BINARYITERATOR3(X,x,Y,y,exp); \
else if (x.ndim() == 4) BINARYITERATOR4(X,x,Y,y,exp); \
else if (x.ndim() == 5) BINARYITERATOR5(X,x,Y,y,exp); \
else if (x.ndim() == 6) BINARYITERATOR6(X,x,Y,y,exp); \
else {TENSOR_ASSERT(x.ndim() <= 6,"ndim confused?",x.ndim(),&x);} \
} while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define BINARYITERATOR6(X, x, Y, y, exp)
Definition: tensor_macros.h:449
#define BINARYITERATOR1(X, x, Y, y, exp)
Definition: tensor_macros.h:358
#define BINARYITERATOR5(X, x, Y, y, exp)
Definition: tensor_macros.h:423
#define BINARYITERATOR3(X, x, Y, y, exp)
Definition: tensor_macros.h:383
#define BINARYITERATOR2(X, x, Y, y, exp)
Definition: tensor_macros.h:369
#define BINARYITERATOR4(X, x, Y, y, exp)
Definition: tensor_macros.h:400
#define BINARYITERATOR1 (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
long __xd0=x.dim(0); \
long __xs0=x.stride(0); \
long __ys0=y.stride(0); \
X* restrict _p0=x.ptr(); \
Y* restrict _p1=y.ptr(); \
for (long _i=0; _i<__xd0; ++_i, _p0+=__xs0, _p1+=__ys0) { \
exp; \
} } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define BINARYITERATOR2 (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1); \
long __xs0=x.stride(0), __xs1=x.stride(1); \
long __ys0=y.stride(0), __ys1=y.stride(1); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0) { \
X* restrict _p0=__xp0; \
Y* restrict _p1=__yp0; \
for (long _j=0; _j<__xd1; ++_j, _p0+=__xs1, _p1+=__ys1) { \
exp; \
} } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define BINARYITERATOR3 (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2); \
long __ys0=y.stride(0), __ys1=y.stride(1), __ys2=y.stride(2); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0) { \
X* restrict __xp1=__xp0; \
Y* restrict __yp1=__yp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1, __yp1+=__ys1) { \
X* restrict _p0=__xp1; \
Y* restrict _p1=__yp1; \
for (long _k=0; _k<__xd2; ++_k, _p0+=__xs2, _p1+=__ys2) { \
exp; \
} } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define BINARYITERATOR4 (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2), \
__xd3=x.dim(3); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2), \
__xs3=x.stride(3); \
long __ys0=y.stride(0), __ys1=y.stride(1), __ys2=y.stride(2), \
__ys3=y.stride(3); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0) { \
X* restrict __xp1=__xp0; \
Y* restrict __yp1=__yp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1, __yp1+=__ys1) { \
X* restrict __xp2=__xp1; \
Y* restrict __yp2=__yp1; \
for (long _k=0; _k<__xd2; ++_k, __xp2+=__xs2, __yp2+=__ys2) { \
X* restrict _p0=__xp2; \
Y* restrict _p1=__yp2; \
for (long _l=0; _l<__xd3; ++_l, _p0+=__xs3, _p1+=__ys3) { \
exp; \
} } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define BINARYITERATOR5 (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2), \
__xd3=x.dim(3), __xd4=x.dim(4); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2), \
__xs3=x.stride(3), __xs4=x.stride(4); \
long __ys0=y.stride(0), __ys1=y.stride(1), __ys2=y.stride(2), \
__ys3=y.stride(3), __ys4=y.stride(4); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0) { \
X* restrict __xp1=__xp0; \
Y* restrict __yp1=__yp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1, __yp1+=__ys1) { \
X* restrict __xp2=__xp1; \
Y* restrict __yp2=__yp1; \
for (long _k=0; _k<__xd2; ++_k, __xp2+=__xs2, __yp2+=__ys2) { \
X* restrict __xp3=__xp2; \
Y* restrict __yp3=__yp2; \
for (long _l=0; _l<__xd3; ++_l, __xp3+=__xs3, __yp3+=__ys3) { \
X* restrict _p0=__xp3; \
Y* restrict _p1=__yp3; \
for (long _m=0; _m<__xd4; ++_m, _p0+=__xs4, _p1+=__ys4) { \
exp; \
} } } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define BINARYITERATOR6 (   X,
  x,
  Y,
  y,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2), \
__xd3=x.dim(3), __xd4=x.dim(4), __xd5=x.dim(5); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2), \
__xs3=x.stride(3), __xs4=x.stride(4), __xs5=x.stride(5); \
long __ys0=y.stride(0), __ys1=y.stride(1), __ys2=y.stride(2), \
__ys3=y.stride(3), __ys4=y.stride(4), __ys5=y.stride(5); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0) { \
X* restrict __xp1=__xp0; \
Y* restrict __yp1=__yp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1, __yp1+=__ys1) { \
X* restrict __xp2=__xp1; \
Y* restrict __yp2=__yp1; \
for (long _k=0; _k<__xd2; ++_k, __xp2+=__xs2, __yp2+=__ys2) { \
X* restrict __xp3=__xp2; \
Y* restrict __yp3=__yp2; \
for (long _l=0; _l<__xd3; ++_l, __xp3+=__xs3, __yp3+=__ys3) { \
X* restrict __xp4=__xp3; \
Y* restrict __yp4=__yp3; \
for (long _m=0; _m<__xd4; ++_m, __xp4+=__xs4, __yp4+=__ys4) { \
X* restrict _p0=__xp4; \
Y* restrict _p1=__yp4; \
for (long _n=0; _n<__xd5; ++_n, _p0+=__xs5, _p1+=__ys5) { \
exp; \
} } } } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define IND   IND6
#define IND1   _i

Macros IND1, ..., IND6, and IND are a convenience for indexing in macro iterators.

Referenced by Test1().

#define IND2   _i,_j

Referenced by Test1().

#define IND3   _i,_j,_k

Referenced by Test1().

#define IND4   _i,_j,_k,_l

Referenced by Test1(), and Test6().

#define IND5   _i,_j,_k,_l,_m

Referenced by Test1(), and Test5().

#define IND6   _i,_j,_k,_l,_m,_n

Referenced by Test1().

#define ITERATOR (   t,
  exp 
)
Value:
do { \
long _j=0, _k=0, _l=0, _m=0, _n=0; \
if (t.ndim() == 1) {ITERATOR1(t,exp);} \
else if (t.ndim() == 2) {ITERATOR2(t,exp);} \
else if (t.ndim() == 3) {ITERATOR3(t,exp);} \
else if (t.ndim() == 4) {ITERATOR4(t,exp);} \
else if (t.ndim() == 5) {ITERATOR5(t,exp);} \
else if (t.ndim() == 6) {ITERATOR6(t,exp);} \
else {TENSOR_ASSERT(t.ndim() <= 6,"ndim confused?",t.ndim(),&t);} \
} while(0)
#define ITERATOR6(t, exp)
Definition: tensor_macros.h:239
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define ITERATOR4(t, exp)
Definition: tensor_macros.h:222
#define ITERATOR2(t, exp)
Definition: tensor_macros.h:211
#define ITERATOR3(t, exp)
Definition: tensor_macros.h:216
#define ITERATOR1(t, exp)
Definition: tensor_macros.h:207
#define ITERATOR5(t, exp)
Definition: tensor_macros.h:230

Referenced by madness::FunctionImpl< Q, NDIM >::err_box(), and main().

#define ITERATOR1 (   t,
  exp 
)
Value:
do { \
long __xd0=t.dim(0),_index=0; \
for (long _i=0; _i<__xd0; ++_i) {exp;_index++;} } while (0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234

Referenced by Test1().

#define ITERATOR2 (   t,
  exp 
)
Value:
do { \
long __xd0=t.dim(0), __xd1=t.dim(1), _index=0; \
for (long _i=0; _i<__xd0; ++_i) { \
for (long _j=0; _j<__xd1; ++_j) {exp;_index++;} } } while (0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234

Referenced by Test1(), and Test7().

#define ITERATOR3 (   t,
  exp 
)
Value:
do { \
long __xd0=t.dim(0), __xd1=t.dim(1), __xd2=t.dim(2), _index=0; \
for (long _i=0; _i<__xd0; ++_i) { \
for (long _j=0; _j<__xd1; ++_j) { \
for (long _k=0; _k<__xd2; ++_k) {exp;_index++;} } } } while (0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234

Referenced by Test1(), and Test3().

#define ITERATOR4 (   t,
  exp 
)
Value:
do { \
long __xd0=t.dim(0), __xd1=t.dim(1), __xd2=t.dim(2), \
__xd3=t.dim(3), _index=0; \
for (long _i=0; _i<__xd0; ++_i) { \
for (long _j=0; _j<__xd1; ++_j) { \
for (long _k=0; _k<__xd2; ++_k) { \
for (long _l=0; _l<__xd3; ++_l) {exp;_index++;} } } } } while (0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234

Referenced by Test1(), Test6(), and Test7().

#define ITERATOR5 (   t,
  exp 
)
Value:
do { \
long __xd0=t.dim(0), __xd1=t.dim(1), __xd2=t.dim(2), \
__xd3=t.dim(3), __xd4=t.dim(4), _index=0; \
for (long _i=0; _i<__xd0; ++_i) { \
for (long _j=0; _j<__xd1; ++_j) { \
for (long _k=0; _k<__xd2; ++_k) { \
for (long _l=0; _l<__xd3; ++_l) { \
for (long _m=0; _m<__xd4; ++_m) {exp;_index++;} } } } } } while (0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234

Referenced by Test1(), and Test5().

#define ITERATOR6 (   t,
  exp 
)
Value:
do { \
long __xd0=t.dim(0), __xd1=t.dim(1), __xd2=t.dim(2), \
__xd3=t.dim(3), __xd4=t.dim(4), __xd5=t.dim(5), _index=0;; \
for (long _i=0; _i<__xd0; ++_i) { \
for (long _j=0; _j<__xd1; ++_j) { \
for (long _k=0; _k<__xd2; ++_k) { \
for (long _l=0; _l<__xd3; ++_l) { \
for (long _m=0; _m<__xd4; ++_m) { \
for (long _n=0; _n<__xd5; ++_n) {exp;_index++;} } } } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234

Referenced by Test1(), and Test7().

#define TENSOR_MAXDIM   6
#define TERNARY_OPTIMIZED_ITERATOR (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
Value:
do { \
if (x.iscontiguous() && y.iscontiguous() && z.iscontiguous() && x.size()==y.size() && x.size()==z.size()) { \
X* restrict _p0 = x.ptr(); \
Y* restrict _p1 = y.ptr(); \
Z* restrict _p2 = z.ptr(); \
for (long _j=0; _j<x.size(); ++_j,++_p0,++_p1,++_p2) {exp;} \
} \
else { \
for (TensorIterator<REMCONST(X),REMCONST(Y),REMCONST(Z)> iter=x.ternary_iterator(y,z,1); iter._p0; ++iter) { \
long _dimj = iter.dimj; \
X* restrict _p0 = iter._p0; \
Y* restrict _p1 = iter._p1; \
Z* restrict _p2 = iter._p2; \
long _s0 = iter._s0; \
long _s1 = iter._s1; \
long _s2 = iter._s2; \
for (long _j=0; _j<_dimj; ++_j, _p0+=_s0, _p1+=_s1, _p2+=_s2) { \
exp; \
} \
} } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define REMCONST(TYPE)
Macro to make remove_const easier to use.
Definition: typestuff.h:257
#define restrict
Definition: config.h:403

Referenced by madness::FunctionImpl< Q, NDIM >::do_mul(), and madness::FunctionImpl< Q, NDIM >::mul().

#define TERNARYITERATOR (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
Value:
do { \
long _j=0, _k=0, _l=0, _m=0, _n=0; \
if (x.ndim() == 1) TERNARYITERATOR1(X,x,Y,y,Z,z,exp); \
else if (x.ndim() == 2) TERNARYITERATOR2(X,x,Y,y,Z,z,exp); \
else if (x.ndim() == 3) TERNARYITERATOR3(X,x,Y,y,Z,z,exp); \
else if (x.ndim() == 4) TERNARYITERATOR4(X,x,Y,y,Z,z,exp); \
else if (x.ndim() == 5) TERNARYITERATOR5(X,x,Y,y,Z,z,exp); \
else if (x.ndim() == 6) TERNARYITERATOR6(X,x,Y,y,Z,z,exp); \
else {TENSOR_ASSERT(x.ndim() <= 6,"ndim confused?",x.ndim(),&x);} \
} while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define TERNARYITERATOR1(X, x, Y, y, Z, z, exp)
Definition: tensor_macros.h:491
#define TERNARYITERATOR3(X, x, Y, y, Z, z, exp)
Definition: tensor_macros.h:523
#define TERNARYITERATOR2(X, x, Y, y, Z, z, exp)
Definition: tensor_macros.h:505
#define TERNARYITERATOR4(X, x, Y, y, Z, z, exp)
Definition: tensor_macros.h:545
#define TERNARYITERATOR5(X, x, Y, y, Z, z, exp)
Definition: tensor_macros.h:575
#define TERNARYITERATOR6(X, x, Y, y, Z, z, exp)
Definition: tensor_macros.h:609
#define TERNARYITERATOR1 (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
TENSOR_ASSERT(x.conforms(z),"first and third tensors do not conform",0,&x); \
long __xd0=x.dim(0); \
long __xs0=x.stride(0); \
long __ys0=y.stride(0); \
long __zs0=z.stride(0); \
X* restrict _p0=x.ptr(); \
Y* restrict _p1=y.ptr(); \
Z* restrict _p2=z.ptr(); \
for (long _i=0; _i<__xd0; ++_i, _p0+=__xs0, _p1+=__ys0, _p2+=__zs0) { \
exp; \
} } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define TERNARYITERATOR2 (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
TENSOR_ASSERT(x.conforms(z),"first and third tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1); \
long __xs0=x.stride(0), __xs1=x.stride(1); \
long __ys0=y.stride(0), __ys1=y.stride(1); \
long __zs0=z.stride(0), __zs1=z.stride(1); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
Z* restrict __zp0=z.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0, __zp0+=__zs0) { \
X* restrict _p0=__xp0; \
Y* restrict _p1=__yp0; \
Z* restrict _p2=__zp0; \
for (long _j=0; _j<__xd1; ++_j, _p0+=__xs1, _p1+=__ys1, _p2+=__zs1) { \
exp; \
} } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define TERNARYITERATOR3 (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
TENSOR_ASSERT(x.conforms(z),"first and third tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2); \
long __ys0=y.stride(0), __ys1=y.stride(1), __ys2=y.stride(2); \
long __zs0=z.stride(0), __zs1=z.stride(1), __zs2=z.stride(2); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
Z* restrict __zp0=z.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0, __zp0+=__zs0) { \
X* restrict __xp1=__xp0; \
Y* restrict __yp1=__yp0; \
Z* restrict __zp1=__zp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1, __yp1+=__ys1, __zp1+=__zs1) { \
X* restrict _p0=__xp1; \
Y* restrict _p1=__yp1; \
Z* restrict _p2=__zp1; \
for (long _k=0; _k<__xd2; ++_k, _p0+=__xs2, _p1+=__ys2, _p2+=__zs2) { \
exp; \
} } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define TERNARYITERATOR4 (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
Value:
do { \
TENSOR_ASSERT(x.conforms(y),"first and second tensors do not conform",0,&x); \
TENSOR_ASSERT(x.conforms(z),"first and third tensors do not conform",0,&x); \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2), \
__xd3=x.dim(3); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2), \
__xs3=x.stride(3); \
long __ys0=y.stride(0), __ys1=y.stride(1), __ys2=y.stride(2), \
__ys3=y.stride(3); \
long __zs0=z.stride(0), __zs1=z.stride(1), __zs2=z.stride(2), \
__zs3=z.stride(3); \
X* restrict __xp0=x.ptr(); \
Y* restrict __yp0=y.ptr(); \
Z* restrict __zp0=z.ptr(); \
for (long _i=0; _i<__xd0; ++_i, __xp0+=__xs0, __yp0+=__ys0, __zp0+=__zs0) { \
X* restrict __xp1=__xp0; \
Y* restrict __yp1=__yp0; \
Z* restrict __zp1=__zp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1, __yp1+=__ys1, __zp1+=__zs1) { \
X* restrict __xp2=__xp1; \
Y* restrict __yp2=__yp1; \
Z* restrict __zp2=__zp1; \
for (long _k=0; _k<__xd2; ++_k, __xp2+=__xs2, __yp2+=__ys2, __zp2+=__zs2) { \
X* restrict _p0=__xp2; \
Y* restrict _p1=__yp2; \
Z* restrict _p2=__zp2; \
for (long _l=0; _l<__xd3; ++_l, _p0+=__xs3, _p1+=__ys3, _p2+=__zs3) { \
exp; \
} } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define restrict
Definition: config.h:403
#define TERNARYITERATOR5 (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
#define TERNARYITERATOR6 (   X,
  x,
  Y,
  y,
  Z,
  z,
  exp 
)
#define UNARY_OPTIMIZED_ITERATOR (   X,
  x,
  exp 
)
Value:
do { \
if (x.iscontiguous()) { \
X* restrict _p0 = x.ptr(); \
for (long _j=0; _j<x.size(); ++_j,++_p0) {exp;} \
} \
else { \
for (TensorIterator<REMCONST(X)> iter=x.unary_iterator(1); iter._p0; ++iter) { \
long _dimj = iter.dimj; \
X* restrict _p0 = iter._p0; \
long _s0 = iter._s0; \
for (long _j=0; _j<_dimj; ++_j, _p0+=_s0) { \
exp; \
} \
} \
} \
} while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define REMCONST(TYPE)
Macro to make remove_const easier to use.
Definition: typestuff.h:257
#define restrict
Definition: config.h:403

Referenced by ln(), and madness::Function< T, NDIM >::SimpleUnaryOpWrapper::operator()().

#define UNARY_UNOPTIMIZED_ITERATOR (   X,
  x,
  exp 
)
Value:
do { \
for (TensorIterator<REMCONST(X)> iter=x.unary_iterator(1,false,false); iter._p0; ++iter) { \
long _dimj = iter.dimj; \
X* restrict _p0 = iter._p0; \
long _s0 = iter._s0; \
for (long _j=0; _j<_dimj; ++_j, _p0+=_s0) { \
exp; \
} \
} } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define REMCONST(TYPE)
Macro to make remove_const easier to use.
Definition: typestuff.h:257
#define restrict
Definition: config.h:403
#define UNARY_UNOPTIMIZED_ITERATOR_NESTED (   X,
  x,
  exp 
)
Value:
do { \
for (TensorIterator<REMCONST(X)> iter2=x.unary_iterator(1,false,false); iter2._p0; ++iter2) { \
long _dimj2 = iter2.dimj; \
X* restrict _q0 = iter2._p0; \
long _s20 = iter2._s0; \
for (long _j2=0; _j2<_dimj2; ++_j2, _q0+=_s20) { \
exp; \
} \
} } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define REMCONST(TYPE)
Macro to make remove_const easier to use.
Definition: typestuff.h:257
#define restrict
Definition: config.h:403
#define UNARYITERATOR (   X,
  x,
  exp 
)
Value:
do { \
long _j=0, _k=0, _l=0, _m=0, _n=0; \
if (x.ndim() == 1) UNARYITERATOR1(X,x,exp); \
else if (x.ndim() == 2) UNARYITERATOR2(X,x,exp); \
else if (x.ndim() == 3) UNARYITERATOR3(X,x,exp); \
else if (x.ndim() == 4) UNARYITERATOR4(X,x,exp); \
else if (x.ndim() == 5) UNARYITERATOR5(X,x,exp); \
else if (x.ndim() == 6) UNARYITERATOR6(X,x,exp); \
else {TENSOR_ASSERT(x.ndim() <= 6,"ndim confused?",x.ndim(),&x);} } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define TENSOR_ASSERT(condition, msg, value, t)
Definition: tensorexcept.h:130
#define UNARYITERATOR4(X, x, exp)
Definition: tensor_macros.h:292
#define UNARYITERATOR2(X, x, exp)
Definition: tensor_macros.h:270
#define UNARYITERATOR3(X, x, exp)
Definition: tensor_macros.h:280
#define UNARYITERATOR1(X, x, exp)
Definition: tensor_macros.h:262
#define UNARYITERATOR5(X, x, exp)
Definition: tensor_macros.h:308
#define UNARYITERATOR6(X, x, exp)
Definition: tensor_macros.h:326
#define UNARYITERATOR1 (   X,
  x,
  exp 
)
Value:
do { \
long __xd0=x.dim(0); \
long __xs0=x.stride(0); \
X* restrict _p0=x.ptr(); \
for (long _i=0; _i<__xd0; ++_i,_p0+=__xs0) { \
exp; \
} } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define restrict
Definition: config.h:403
#define UNARYITERATOR2 (   X,
  x,
  exp 
)
Value:
do { \
long __xd0=x.dim(0), __xd1=x.dim(1); \
long __xs0=x.stride(0), __xs1=x.stride(1); \
X* restrict __xp0=x.ptr(); \
for (long _i=0; _i<__xd0; ++_i,__xp0+=__xs0) { \
X* restrict _p0=__xp0; \
for (long _j=0; _j<__xd1; ++_j, _p0+=__xs1) { \
exp; \
} } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define restrict
Definition: config.h:403
#define UNARYITERATOR3 (   X,
  x,
  exp 
)
Value:
do { \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2); \
X* restrict __xp0=x.ptr(); \
for (long _i=0; _i<__xd0; ++_i,__xp0+=__xs0) { \
X* restrict __xp1=__xp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1) { \
X* restrict _p0=__xp1; \
for (long _k=0; _k<__xd2; ++_k, _p0+=__xs2) { \
exp; \
} } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define restrict
Definition: config.h:403
#define UNARYITERATOR4 (   X,
  x,
  exp 
)
Value:
do { \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2), \
__xd3=x.dim(3); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2), \
__xs3=x.stride(3); \
X* restrict __xp0=x.ptr(); \
for (long _i=0; _i<__xd0; ++_i,__xp0+=__xs0) { \
X* restrict __xp1=__xp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1) { \
X* restrict __xp2=__xp1; \
for (long _k=0; _k<__xd2; ++_k, __xp2+=__xs2) { \
X* restrict _p0=__xp2; \
for (long _l=0; _l<__xd3; ++_l, _p0+=__xs3) { \
exp; \
} } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define restrict
Definition: config.h:403
#define UNARYITERATOR5 (   X,
  x,
  exp 
)
Value:
do { \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2), \
__xd3=x.dim(3), __xd4=x.dim(4); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2), \
__xs3=x.stride(3), __xs4=x.stride(4); \
X* restrict __xp0=x.ptr(); \
for (long _i=0; _i<__xd0; ++_i,__xp0+=__xs0) { \
X* restrict __xp1=__xp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1) { \
X* restrict __xp2=__xp1; \
for (long _k=0; _k<__xd2; ++_k, __xp2+=__xs2) { \
X* restrict __xp3=__xp2; \
for (long _l=0; _l<__xd3; ++_l, __xp3+=__xs3) { \
X* restrict _p0 =__xp3; \
for (long _m=0; _m<__xd4; ++_m, _p0+=__xs4) { \
exp; \
} } } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define restrict
Definition: config.h:403
#define UNARYITERATOR6 (   X,
  x,
  exp 
)
Value:
do { \
long __xd0=x.dim(0), __xd1=x.dim(1), __xd2=x.dim(2), \
__xd3=x.dim(3), __xd4=x.dim(4), __xd5=x.dim(5); \
long __xs0=x.stride(0), __xs1=x.stride(1), __xs2=x.stride(2), \
__xs3=x.stride(3), __xs4=x.stride(4), __xs5=x.stride(5); \
X* restrict __xp0=x.ptr(); \
for (long _i=0; _i<__xd0; ++_i,__xp0+=__xs0) { \
X* restrict __xp1=__xp0; \
for (long _j=0; _j<__xd1; ++_j, __xp1+=__xs1) { \
X* restrict __xp2=__xp1; \
for (long _k=0; _k<__xd2; ++_k, __xp2+=__xs2) { \
X* restrict __xp3=__xp2; \
for (long _l=0; _l<__xd3; ++_l, __xp3+=__xs3) { \
X* restrict __xp4=__xp3; \
for (long _m=0; _m<__xd4; ++_m, __xp4+=__xs4) { \
X* restrict _p0=__xp4; \
for (long _n=0; _n<__xd5; ++_n, _p0+=__xs5) { \
exp; \
} } } } } } } while(0)
const mpreal exp(const mpreal &v, mp_rnd_t rnd_mode)
Definition: mpreal.h:2234
#define restrict
Definition: config.h:403