MADNESS  version 0.9
libxc.h
Go to the documentation of this file.
1 /*
2  This file is part of MADNESS.
3 
4  Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20  For more information please contact:
21 
22  Robert J. Harrison
23  Oak Ridge National Laboratory
24  One Bethel Valley Road
25  P.O. Box 2008, MS-6367
26 
27  email: harrisonrj@ornl.gov
28  tel: 865-241-3937
29  fax: 865-572-0680
30 
31  $Id$
32 */
33 /*
34  * libxc.h
35  *
36  * Created on: Nov 23, 2008
37  * Author: wsttiger
38  */
39 
40 #ifndef LIBXC_H_
41 #define LIBXC_H_
42 
43 //#define WORLD_INSTANTIATE_STATIC_TEMPLATES
44 #include <madness/mra/mra.h>
45 #include <madness/world/world.h>
46 //#include "xc.h"
47 #include "lda.h"
48 
49 using namespace madness;
50 
51 //***************************************************************************
52 static double munge(double r) {
53  if (r < 1e-15) r = 2e-15;
54  return r;
55 }
56 //***************************************************************************
57 
58 //***************************************************************************
59 template <typename T>
60 inline static void ldaop(const Key<3>& key, Tensor<T>& t) {
61  UNARY_OPTIMIZED_ITERATOR(T, t, double r=munge(2.0* *_p0); double q; double dq1; double dq2;x_rks_s__(&r, &q, &dq1);c_rks_vwn5__(&r, &q, &dq2); *_p0 = dq1+dq2);
62 }
63 //***************************************************************************
64 
65 //***************************************************************************
66 template <typename T>
67 inline static void ldaeop(const Key<3>& key, Tensor<T>& t) {
68  UNARY_OPTIMIZED_ITERATOR(T, t, double r=munge(2.0* *_p0); double q1; double q2; double dq;x_rks_s__(&r, &q1, &dq);c_rks_vwn5__(&r, &q2, &dq); *_p0 = q1+q2);
69 }
70 //***************************************************************************
71 
73 //template <typename T>
74 //inline static void libxc_ldaop(const Key<3>& key, Tensor<T>& t) {
75 // XC(lda_type) xc_c_func;
76 // XC(lda_type) xc_x_func;
77 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_UNPOLARIZED);
78 // xc_lda_x_init(&xc_x_func, XC_UNPOLARIZED, 3, 0);
79 // UNARY_OPTIMIZED_ITERATOR(T, t, double r=munge(2.0* *_p0); double q; double dq1; double dq2;
80 // xc_lda_vxc(&xc_x_func, &r, &q, &dq1); xc_lda_vxc(&xc_c_func, &r, &q, &dq2);
81 // *_p0 = dq1+dq2);
82 //}
84 
86 //template <typename T>
87 //inline static void libxc_ldaop_sp(const Key<3>& key, Tensor<T>& t, Tensor<T>& a, Tensor<T>& b)
88 //{
89 // XC(lda_type) xc_c_func;
90 // XC(lda_type) xc_x_func;
91 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_POLARIZED);
92 // xc_lda_x_init(&xc_x_func, XC_POLARIZED, 3, 0);
93 // TERNARY_OPTIMIZED_ITERATOR(T, t, T, a, T, b, double r[2]; r[0] = munge(*_p1);
94 // r[1] = munge(*_p2); double q[2]; double dq1[2]; double dq2[2];
95 // xc_lda_vxc(&xc_x_func, &r[0], &q[0], &dq1[0]); xc_lda_vxc(&xc_c_func, &r[0], &q[0], &dq2[0]);
96 // *_p0 = dq1[0]+dq2[0]);
97 //}
99 
101 //template <typename T>
102 //inline static void libxc_ldaeop_sp(const Key<3>& key, Tensor<T>& t, Tensor<T>& a, Tensor<T>& b)
103 //{
104 // XC(lda_type) xc_c_func;
105 // XC(lda_type) xc_x_func;
106 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_POLARIZED);
107 // xc_lda_x_init(&xc_x_func, XC_POLARIZED, 3, 0);
108 // TERNARY_OPTIMIZED_ITERATOR(T, t, T, a, T, b, double r[2]; r[0] = munge(*_p1);
109 // r[1] = munge(*_p2); double q1[2]; double q2[2]; double dq[2];
110 // xc_lda_vxc(&xc_x_func, &r[0], &q1[0], &dq[0]); xc_lda_vxc(&xc_c_func, &r[0], &q2[0], &dq[0]);
111 // *_p0 = q1[0]+q2[0]);
112 //}
114 
116 //inline static void libxc_ldaeop_sp(const Key<3>& key, Tensor<double>& t) {
117 // XC(lda_type) xc_c_func;
118 // XC(lda_type) xc_x_func;
119 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_UNPOLARIZED);
120 // xc_lda_x_init(&xc_x_func, XC_UNPOLARIZED, 3, 0);
121 // UNARY_OPTIMIZED_ITERATOR(double, t, double r=munge(2.0* *_p0); double q1; double q2; double dq; xc_lda_vxc(&xc_x_func, &r, &q1, &dq); xc_lda_vxc(&xc_c_func, &r, &q2, &dq); *_p0 = q1+q2);
122 //}
124 
125 //const double THRESH_RHO = 1e-8;
126 //const double THRESH_GRHO = 1e-20;
127 //
129 //inline void wst_munge_grho(int npoint, double *rho, double *grho) {
130 // for (int i=0; i<npoint; i++) {
131 // if (rho[i]<THRESH_RHO) rho[i] = THRESH_RHO;
132 // if ((rho[i] <=THRESH_RHO) ||
133 // (grho[i] < THRESH_GRHO)) grho[i] = THRESH_GRHO;
134 // }
135 //}
137 //
139 //inline void wst_munge_rho(int npoint, double *rho) {
140 // for (int i=0; i<npoint; i++) {
141 // if (rho[i]<THRESH_RHO) rho[i] = THRESH_RHO;
142 // }
143 //}
145 //
147 //inline void xc_generic_lda(Tensor<double> rho_alpha, ///< Alpha-spin density at each grid point
148 // Tensor<double> f, ///< Value of functional at each grid point
149 // Tensor<double> df_drho, ///< Derivative of functional w.r.t. rho_alpha
150 // bool spinpol)
151 // {
152 // MADNESS_ASSERT(rho_alpha.iscontiguous());
153 // MADNESS_ASSERT(f.iscontiguous());
154 // MADNESS_ASSERT(df_drho.iscontiguous());
155 //
156 // rho_alpha = rho_alpha.flat();
157 // f = f.flat();
158 // df_drho = df_drho.flat();
159 //
160 // XC(lda_type) xc_c_func;
161 // XC(lda_type) xc_x_func;
162 //
163 // int npt = rho_alpha.dim(0);
164 //
165 // Tensor<double> tf(npt);
166 // Tensor<double> tdf_drho(npt);
167 // double* rhoptr = rho_alpha.ptr();
168 // double* tfptr = tf.ptr();
169 // double* tdf_drhoptr = tdf_drho.ptr();
170 //
171 // tf.fill(0.0);
172 // tdf_drho.fill(0.0);
173 // f.fill(0.0);
174 // df_drho.fill(0.0);
175 //
176 // wst_munge_rho(npt, rhoptr);
177 //
178 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_UNPOLARIZED);
179 // for (int i = 0; i < npt; i++)
180 // {
181 // xc_lda_vxc(&xc_c_func, &rhoptr[i], &tfptr[i], &tdf_drhoptr[i]);
182 // }
183 //
184 // f.gaxpy(1.0, tf, 1.0);
185 // df_drho.gaxpy(1.0, tdf_drho, 1.0);
186 //
187 // tf.fill(0.0);
188 // tdf_drho.fill(0.0);
189 //
190 // xc_lda_x_init(&xc_x_func, XC_UNPOLARIZED, 3, 0);
191 // for (int i = 0; i < npt; i++)
192 // {
193 // xc_lda_vxc(&xc_x_func, &rhoptr[i], &tfptr[i], &tdf_drhoptr[i]);
194 // }
195 //
196 // f.gaxpy(1.0, tf, 1.0);
197 // df_drho.gaxpy(1.0, tdf_drho, 1.0);
198 //}
199 // //***************************************************************************
200 //
201 // //***************************************************************************
202 // template <int NDIM>
203 // inline void xc_lda_V(const Key<NDIM>& key, Tensor<double>& t)
204 // {
205 // Tensor<double> enefunc = copy(t);
206 // Tensor<double> V = copy(t);
207 // ::xc_generic_lda(t, enefunc, V, false);
208 // t(___) = V(___);
209 // }
210 // //***************************************************************************
211 //
212 // //***************************************************************************
213 // template <int NDIM>
214 // inline void xc_lda_ene(const Key<NDIM>& key, Tensor<double>& t)
215 // {
216 // Tensor<double> V = copy(t);
217 // Tensor<double> enefunc = copy(t);
218 // ::xc_generic_lda(t, enefunc, V, false);
219 // t(___) = enefunc(___);
220 // }
221 // //***************************************************************************
222 
223 
224 
225 #endif /* LIBXC_H_ */
int x_rks_s__(const double *r__, double *f, double *dfdra)
Definition: chem/lda.cc:58
Main include file for MADNESS and defines Function interface.
This header should include pretty much everything needed for the parallel runtime.
const T1 &f1 return GTEST_2_TUPLE_() T(f0, f1)
#define UNARY_OPTIMIZED_ITERATOR(X, x, exp)
Definition: tensor_macros.h:658
int c_rks_vwn5__(const double *r__, double *f, double *dfdra)
Definition: chem/lda.cc:116
double munge(double x, double xmin, double xmax)
Definition: nonlinschro.cc:131
Holds machinery to set up Functions/FuncImpls using various Factories and Interfaces.
Definition: chem/atomutil.cc:45
Key is the index for a node of the 2^NDIM-tree.
Definition: key.h:69