diff --git a/manual/REANNPackage_manumal_v_1.0.pdf b/manual/REANNPackage_manumal_v_1.0.pdf new file mode 100644 index 0000000..e511d96 Binary files /dev/null and b/manual/REANNPackage_manumal_v_1.0.pdf differ diff --git a/manual/manual.pdf b/manual/manual.pdf deleted file mode 100644 index b1e8f90..0000000 Binary files a/manual/manual.pdf and /dev/null differ diff --git a/reann/dm/PES.py b/reann/dm/PES.py index 30f6781..7c2edc1 100644 --- a/reann/dm/PES.py +++ b/reann/dm/PES.py @@ -15,13 +15,13 @@ def __init__(self,nlinked=1): # global parameters for input_nn nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn nl=[128,128] # NN structure - dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer + dropout_p=[0.0,0.0] # dropout probability for each hidden layer activate = 'Relu_like' table_norm= True oc_loop = 1 oc_nl = [128,128] # neural network architecture oc_nblock = 1 - oc_dropout_p=[0.0,0.0,0.0,0.0] + oc_dropout_p=[0.0,0.0] oc_activate = 'Relu_like' #========================queue_size sequence for laod data into gpu oc_table_norm = True diff --git a/reann/dm/script_PES.py b/reann/dm/script_PES.py index 9dbe7cb..e55b280 100644 --- a/reann/dm/script_PES.py +++ b/reann/dm/script_PES.py @@ -5,7 +5,7 @@ def jit_pes(): init_pes=PES.PES() state_dict = torch.load("REANN.pth",map_location='cpu') new_state_dict = OrderedDict() - for k, v in state_dict['eannparam'].items(): + for k, v in state_dict['reannparam'].items(): if k[0:7]=="module.": name = k[7:] # remove `module.` new_state_dict[name] = v diff --git a/reann/lammps-REANN-interface/src/pair_eann.cpp b/reann/lammps-REANN-interface/src/pair_reann.cpp similarity index 94% rename from reann/lammps-REANN-interface/src/pair_eann.cpp rename to reann/lammps-REANN-interface/src/pair_reann.cpp index b3f4083..1276ce8 100644 --- a/reann/lammps-REANN-interface/src/pair_eann.cpp +++ b/reann/lammps-REANN-interface/src/pair_reann.cpp @@ -5,7 +5,7 @@ // // file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include -#include +#include #include #include #include @@ -25,11 +25,11 @@ using namespace LAMMPS_NS; using namespace std; -PairEANN::PairEANN(LAMMPS *lmp) : Pair(lmp) +PairREANN::PairREANN(LAMMPS *lmp) : Pair(lmp) { } -PairEANN::~PairEANN() +PairREANN::~PairREANN() { if (allocated) { @@ -41,7 +41,7 @@ PairEANN::~PairEANN() atom->map_style = Atom::MAP_NONE; } -void PairEANN::allocate() +void PairREANN::allocate() { allocated = 1; int n = atom->ntypes; @@ -58,7 +58,7 @@ void PairEANN::allocate() -void PairEANN::init_style() +void PairREANN::init_style() { int irequest = neighbor->request(this,instance_me); neighbor->requests[irequest]->pair = 1; @@ -71,10 +71,10 @@ void PairEANN::init_style() torch::jit::setGraphExecutorOptimize(true); // load the model // Deserialize the ScriptModule from a file using torch::jit::load(). - if (datatype=="double") module = torch::jit::load("EANN_LAMMPS_DOUBLE.pt"); + if (datatype=="double") module = torch::jit::load("REANN_LAMMPS_DOUBLE.pt"); else { - module = torch::jit::load("EANN_LAMMPS_FLOAT.pt"); + module = torch::jit::load("REANN_LAMMPS_FLOAT.pt"); tensor_type = torch::kFloat32; } // freeze the module @@ -116,7 +116,7 @@ void PairEANN::init_style() } -void PairEANN::coeff(int narg, char **arg) +void PairREANN::coeff(int narg, char **arg) { if (!allocated) { @@ -145,12 +145,12 @@ void PairEANN::coeff(int narg, char **arg) } -void PairEANN::settings(int narg, char **arg) +void PairREANN::settings(int narg, char **arg) { } -double PairEANN::init_one(int i, int j) +double PairREANN::init_one(int i, int j) { return cutoff; } @@ -171,7 +171,7 @@ double PairEANN::init_one(int i, int j) //#pragma GCC push_options //#pragma GCC optimize (0) -void PairEANN::compute(int eflag, int vflag) +void PairREANN::compute(int eflag, int vflag) { if(eflag || vflag) ev_setup(eflag,vflag); else evflag = vflag_fdotr = eflag_global = eflag_atom = 0; @@ -270,7 +270,7 @@ void PairEANN::compute(int eflag, int vflag) } //#pragma GCC pop_options // -int PairEANN::select_gpu() +int PairREANN::select_gpu() { int totalnodes, mynode; int trap_key = 0; diff --git a/reann/lammps-interface/src/pair_eann.h b/reann/lammps-REANN-interface/src/pair_reann.h similarity index 80% rename from reann/lammps-interface/src/pair_eann.h rename to reann/lammps-REANN-interface/src/pair_reann.h index 827d88c..51fc3e4 100644 --- a/reann/lammps-interface/src/pair_eann.h +++ b/reann/lammps-REANN-interface/src/pair_reann.h @@ -1,11 +1,11 @@ #ifdef PAIR_CLASS -PairStyle(eann,PairEANN) // eann is the name in the input script +PairStyle(reann,PairREANN) // reann is the name in the input script #else -#ifndef LMP_PAIR_EANN_H -#define LMP_PAIR_EANN_H +#ifndef LMP_PAIR_REANN_H +#define LMP_PAIR_REANN_H #include "pair.h" #include @@ -14,12 +14,12 @@ PairStyle(eann,PairEANN) // eann is the name in the input script namespace LAMMPS_NS { - class PairEANN : public Pair + class PairREANN : public Pair { public: torch::jit::script::Module module; - PairEANN(class LAMMPS *); - virtual ~PairEANN(); + PairREANN(class LAMMPS *); + virtual ~PairREANN(); virtual void compute(int, int); virtual void init_style(); virtual double init_one(int, int); diff --git a/reann/lammps-interface/src/pair_eann.cpp b/reann/lammps-interface/src/pair_reann.cpp similarity index 94% rename from reann/lammps-interface/src/pair_eann.cpp rename to reann/lammps-interface/src/pair_reann.cpp index 9a3a881..aca404a 100644 --- a/reann/lammps-interface/src/pair_eann.cpp +++ b/reann/lammps-interface/src/pair_reann.cpp @@ -5,7 +5,7 @@ // // file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include -#include +#include #include #include #include @@ -25,11 +25,11 @@ using namespace LAMMPS_NS; using namespace std; -PairEANN::PairEANN(LAMMPS *lmp) : Pair(lmp) +PairREANN::PairREANN(LAMMPS *lmp) : Pair(lmp) { } -PairEANN::~PairEANN() +PairREANN::~PairREANN() { if (allocated) { @@ -38,7 +38,7 @@ PairEANN::~PairEANN() } } -void PairEANN::allocate() +void PairREANN::allocate() { allocated = 1; int n = atom->ntypes; @@ -55,7 +55,7 @@ void PairEANN::allocate() -void PairEANN::init_style() +void PairREANN::init_style() { int irequest = neighbor->request(this,instance_me); neighbor->requests[irequest]->pair = 1; @@ -68,10 +68,10 @@ void PairEANN::init_style() torch::jit::setGraphExecutorOptimize(true); // load the model // Deserialize the ScriptModule from a file using torch::jit::load(). - if (datatype=="double") module = torch::jit::load("EANN_LAMMPS_DOUBLE.pt"); + if (datatype=="double") module = torch::jit::load("REANN_LAMMPS_DOUBLE.pt"); else { - module = torch::jit::load("EANN_LAMMPS_FLOAT.pt"); + module = torch::jit::load("REANN_LAMMPS_FLOAT.pt"); tensor_type = torch::kFloat32; } // freeze the module @@ -106,7 +106,7 @@ void PairEANN::init_style() } -void PairEANN::coeff(int narg, char **arg) +void PairREANN::coeff(int narg, char **arg) { if (!allocated) { @@ -135,12 +135,12 @@ void PairEANN::coeff(int narg, char **arg) } -void PairEANN::settings(int narg, char **arg) +void PairREANN::settings(int narg, char **arg) { } -double PairEANN::init_one(int i, int j) +double PairREANN::init_one(int i, int j) { return cutoff; } @@ -161,7 +161,7 @@ double PairEANN::init_one(int i, int j) //#pragma GCC push_options //#pragma GCC optimize (0) -void PairEANN::compute(int eflag, int vflag) +void PairREANN::compute(int eflag, int vflag) { if(eflag || vflag) ev_setup(eflag,vflag); else evflag = vflag_fdotr = eflag_global = eflag_atom = 0; @@ -260,7 +260,7 @@ void PairEANN::compute(int eflag, int vflag) } //#pragma GCC pop_options // -int PairEANN::select_gpu() +int PairREANN::select_gpu() { int totalnodes, mynode; int trap_key = 0; diff --git a/reann/lammps-REANN-interface/src/pair_eann.h b/reann/lammps-interface/src/pair_reann.h similarity index 80% rename from reann/lammps-REANN-interface/src/pair_eann.h rename to reann/lammps-interface/src/pair_reann.h index 827d88c..51fc3e4 100644 --- a/reann/lammps-REANN-interface/src/pair_eann.h +++ b/reann/lammps-interface/src/pair_reann.h @@ -1,11 +1,11 @@ #ifdef PAIR_CLASS -PairStyle(eann,PairEANN) // eann is the name in the input script +PairStyle(reann,PairREANN) // reann is the name in the input script #else -#ifndef LMP_PAIR_EANN_H -#define LMP_PAIR_EANN_H +#ifndef LMP_PAIR_REANN_H +#define LMP_PAIR_REANN_H #include "pair.h" #include @@ -14,12 +14,12 @@ PairStyle(eann,PairEANN) // eann is the name in the input script namespace LAMMPS_NS { - class PairEANN : public Pair + class PairREANN : public Pair { public: torch::jit::script::Module module; - PairEANN(class LAMMPS *); - virtual ~PairEANN(); + PairREANN(class LAMMPS *); + virtual ~PairREANN(); virtual void compute(int, int); virtual void init_style(); virtual double init_one(int, int); diff --git a/reann/lammps/script_PES.py b/reann/lammps/script_PES.py index 0be4bec..6d36324 100644 --- a/reann/lammps/script_PES.py +++ b/reann/lammps/script_PES.py @@ -3,9 +3,9 @@ import torch def jit_pes(): init_pes=PES.PES() - state_dict = torch.load("EANN.pth",map_location='cpu') + state_dict = torch.load("REANN.pth",map_location='cpu') new_state_dict = OrderedDict() - for k, v in state_dict['eannparam'].items(): + for k, v in state_dict['reannparam'].items(): if k[0:7]=="module.": name = k[7:] # remove `module.` new_state_dict[name] = v @@ -17,6 +17,6 @@ def jit_pes(): for params in scripted_pes.parameters(): params.requires_grad=False scripted_pes.to(torch.double) - scripted_pes.save("EANN_LAMMPS_DOUBLE.pt") + scripted_pes.save("REANN_LAMMPS_DOUBLE.pt") scripted_pes.to(torch.float32) - scripted_pes.save("EANN_LAMMPS_FLOAT.pt") + scripted_pes.save("REANN_LAMMPS_FLOAT.pt") diff --git a/reann/lammps_REANN/script_PES.py b/reann/lammps_REANN/script_PES.py index 9865961..33b0a77 100644 --- a/reann/lammps_REANN/script_PES.py +++ b/reann/lammps_REANN/script_PES.py @@ -3,9 +3,9 @@ import torch def jit_pes(): init_pes=PES.PES() - state_dict = torch.load("EANN.pth",map_location='cpu') + state_dict = torch.load("REANN.pth",map_location='cpu') new_state_dict = OrderedDict() - for k, v in state_dict['eannparam'].items(): + for k, v in state_dict['reannparam'].items(): if k[0:7]=="module.": name = k[7:] # remove `module.` new_state_dict[name] = v @@ -17,6 +17,6 @@ def jit_pes(): for params in scripted_pes.parameters(): params.requires_grad=False scripted_pes.to(torch.double) - scripted_pes.save("EANN_LAMMPS_DOUBLE.pt") + scripted_pes.save("REANN_LAMMPS_DOUBLE.pt") scripted_pes.to(torch.float32) - scripted_pes.save("EANN_LAMMPS_FLOAT.pt") + scripted_pes.save("REANN_LAMMPS_FLOAT.pt") diff --git a/reann/pes/PES.py b/reann/pes/PES.py index df9bbdb..d0cf672 100644 --- a/reann/pes/PES.py +++ b/reann/pes/PES.py @@ -15,13 +15,13 @@ def __init__(self,nlinked=1): # global parameters for input_nn nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn nl=[128,128] # NN structure - dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer + dropout_p=[0.0,0.0] # dropout probability for each hidden layer activate = 'Relu_like' table_norm= True oc_loop = 1 oc_nl = [128,128] # neural network architecture oc_nblock = 1 - oc_dropout_p=[0.0,0.0,0.0,0.0] + oc_dropout_p=[0.0,0.0] oc_activate = 'Relu_like' #========================queue_size sequence for laod data into gpu oc_table_norm=True diff --git a/reann/pes/script_PES.py b/reann/pes/script_PES.py index 4f98b9d..cd58421 100644 --- a/reann/pes/script_PES.py +++ b/reann/pes/script_PES.py @@ -3,9 +3,9 @@ import torch def jit_pes(): init_pes=PES.PES() - state_dict = torch.load("EANN.pth",map_location='cpu') + state_dict = torch.load("REANN.pth",map_location='cpu') new_state_dict = OrderedDict() - for k, v in state_dict['eannparam'].items(): + for k, v in state_dict['reannparam'].items(): if k[0:7]=="module.": name = k[7:] # remove `module.` new_state_dict[name] = v @@ -17,6 +17,6 @@ def jit_pes(): for params in scripted_pes.parameters(): params.requires_grad=False scripted_pes.to(torch.double) - scripted_pes.save("EANN_PES_DOUBLE.pt") + scripted_pes.save("REANN_PES_DOUBLE.pt") scripted_pes.to(torch.float32) - scripted_pes.save("EANN_PES_FLOAT.pt") + scripted_pes.save("REANN_PES_FLOAT.pt") diff --git a/reann/pol/PES.py b/reann/pol/PES.py index c483daa..ca29d32 100644 --- a/reann/pol/PES.py +++ b/reann/pol/PES.py @@ -15,13 +15,13 @@ def __init__(self,nlinked=1): # global parameters for input_nn nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn nl=[128,128] # NN structure - dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer + dropout_p=[0.0,0.0] # dropout probability for each hidden layer activate = 'Relu_like' table_norm= True oc_loop = 1 oc_nl = [128,128] # neural network architecture oc_nblock = 1 - oc_dropout_p=[0.0,0.0,0.0,0.0] + oc_dropout_p=[0.0,0.0] oc_activate = 'Relu_like' #========================queue_size sequence for laod data into gpu oc_table_norm=True diff --git a/reann/pol/script_PES.py b/reann/pol/script_PES.py index b661f5c..5de0c58 100644 --- a/reann/pol/script_PES.py +++ b/reann/pol/script_PES.py @@ -3,9 +3,9 @@ import torch def jit_pes(): init_pes=PES.PES() - state_dict = torch.load("EANN.pth",map_location='cpu') + state_dict = torch.load("REANN.pth",map_location='cpu') new_state_dict = OrderedDict() - for k, v in state_dict['eannparam'].items(): + for k, v in state_dict['reannparam'].items(): if k[0:7]=="module.": name = k[7:] # remove `module.` new_state_dict[name] = v @@ -17,6 +17,6 @@ def jit_pes(): for params in scripted_pes.parameters(): params.requires_grad=False scripted_pes.to(torch.double) - scripted_pes.save("EANN_POL_DOUBLE.pt") + scripted_pes.save("REANN_POL_DOUBLE.pt") scripted_pes.to(torch.float32) - scripted_pes.save("EANN_POL_FLOAT.pt") + scripted_pes.save("REANN_POL_FLOAT.pt") diff --git a/reann/src/optimize.py b/reann/src/optimize.py index c862150..3462144 100755 --- a/reann/src/optimize.py +++ b/reann/src/optimize.py @@ -67,8 +67,8 @@ def Optimize(fout,prop_ceff,nprop,train_nele,test_nele,init_f,final_f,start_lr,e # save the best model if lossprop[0]=2 resduial NN block will be employed nblock=1: simple feedforward nn +nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn ratio=0.9 # ratio for vaildation #========================================================== Epoch=10000 # total numbers of epochs for fitting -patience_epoch=500 # patience epoch Number of epochs with no improvement after which learning rate will be reduced. -decay_factor=0.6 # Factor by which the learning rate will be reduced. new_lr = lr * factor. -print_epoch=10 # number of epoch to calculate and print the error +patience_epoch=100 # patience epoch Number of epochs with no improvement after which learning rate will be reduced. +decay_factor=0.5 # Factor by which the learning rate will be reduced. new_lr = lr * factor. +print_epoch=1 # number of epoch to calculate and print the error # adam parameter start_lr=0.001 # initial learning rate -end_lr=1e-4 # final learning rate +end_lr=1e-5 # final learning rate #========================================================== # regularization coefficence re_ceff=0.0 # L2 normalization cofficient batchsize_train=32 # batch size -batchsize_test=512 # batch size +batchsize_test=256 # batch size e_ceff=0.1 # weight of energy init_f = 10 # initial force weight in loss function final_f = 5e-1 # final force weight in loss function nl=[128,128] # NN structure -dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer +dropout_p=[0.0,0.0] # dropout probability for each hidden layer activate = 'Relu_like' # default "Tanh_like", optional "Relu_like" queue_size=10 table_norm= True find_unused = False #===========param for orbital coefficient =============================================== -oc_loop = 0 +oc_loop = 1 oc_nl = [128,128] # neural network architecture oc_nblock = 1 -oc_dropout_p=[0.0,0.0,0.0,0.0] +oc_dropout_p=[0.0,0.0] #=====================act fun=========================== oc_activate = 'Relu_like' # default "Tanh_like", optional "Relu_like" #========================queue_size sequence for laod data into gpu -oc_table_norm=False +oc_table_norm=True DDP_backend="nccl" # floder to save the data floder="./" diff --git a/reann/tdm/PES.py b/reann/tdm/PES.py index 8005e5c..1833012 100644 --- a/reann/tdm/PES.py +++ b/reann/tdm/PES.py @@ -15,13 +15,13 @@ def __init__(self,nlinked=1): # global parameters for input_nn nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn nl=[128,128] # NN structure - dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer + dropout_p=[0.0,0.0] # dropout probability for each hidden layer activate = 'Relu_like' table_norm = True oc_loop = 1 - oc_nl = [32,32] # neural network architecture + oc_nl = [128,128] # neural network architecture oc_nblock = 1 - oc_dropout_p=[0.0,0.0,0.0,0.0] + oc_dropout_p=[0.0,0.0] oc_activate = 'Relu_like' #========================queue_size sequence for laod data into gpu oc_table_norm=True diff --git a/reann/tdm/script_PES.py b/reann/tdm/script_PES.py index 02a987f..2cd1482 100644 --- a/reann/tdm/script_PES.py +++ b/reann/tdm/script_PES.py @@ -3,9 +3,9 @@ import torch def jit_pes(): init_pes=PES.PES() - state_dict = torch.load("EANN.pth",map_location='cpu') + state_dict = torch.load("REANN.pth",map_location='cpu') new_state_dict = OrderedDict() - for k, v in state_dict['eannparam'].items(): + for k, v in state_dict['reannparam'].items(): if k[0:7]=="module.": name = k[7:] # remove `module.` new_state_dict[name] = v @@ -17,6 +17,6 @@ def jit_pes(): for params in scripted_pes.parameters(): params.requires_grad=False scripted_pes.to(torch.double) - scripted_pes.save("EANN_TDM_DOUBLE.pt") + scripted_pes.save("REANN_TDM_DOUBLE.pt") scripted_pes.to(torch.float32) - scripted_pes.save("EANN_TDM_FLOAT.pt") + scripted_pes.save("REANN_TDM_FLOAT.pt")