Skip to content

Commit

Permalink
2021_11_30
Browse files Browse the repository at this point in the history
  • Loading branch information
zhangylch committed Nov 30, 2021
1 parent 5b8c3ee commit ceebc8e
Show file tree
Hide file tree
Showing 11 changed files with 48 additions and 45 deletions.
3 changes: 1 addition & 2 deletions readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,4 @@ ___________________________________
**References:**
1. The original EANN model: Yaolong Zhang, Ce Hu and Bin Jiang *J. Phys. Chem. Lett.* 10, 4962-4967 (2019).
2. The EANN model for dipole/transition dipole/polarizability: Yaolong Zhang Sheng Ye, Jinxiao Zhang, Jun Jiang and Bin Jiang *J. Phys. Chem. B* 124, 7284–7290 (2020).
3. The REANN model: Yaolong Zhang, Junfan xia and Bin Jiang *arXiv:2106.08245*

3. The REANN model: Yaolong Zhang, Junfan xia and Bin Jiang *Phys. Rev. Lett.* 127, 156002 (2021).
15 changes: 8 additions & 7 deletions reann/dm/PES.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@ def __init__(self,nlinked=1):
global oc_loop,oc_nblock, oc_nl, oc_dropout_p, oc_table_norm, oc_activate
global nwave, neigh_atoms, cutoff, nipsin, atomtype
# global parameters for input_nn
nblock = 2 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn
nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn
nl=[128,128] # NN structure
dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer
activate = 'Relu_like'
table_norm= False
oc_loop = 0
oc_nl = [32,32] # neural network architecture
table_norm= True
oc_loop = 1
oc_nl = [128,128] # neural network architecture
oc_nblock = 1
oc_dropout_p=[0.0,0.0,0.0,0.0]
oc_activate = 'Relu_like'
#========================queue_size sequence for laod data into gpu
oc_table_norm = False
oc_table_norm = True
norbit = None
#======================read input_nn==================================
with open('para/input_nn','r') as f1:
Expand All @@ -42,8 +42,9 @@ def __init__(self,nlinked=1):
outputneuron=1
#======================read input_nn=============================================
nipsin=2
cutoff=4.0
nwave=6
cutoff=4.5
nwave=7
neigh_atoms=150
with open('para/input_density','r') as f1:
while True:
tmp=f1.readline()
Expand Down
6 changes: 3 additions & 3 deletions reann/dm/script_PES.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch
def jit_pes():
init_pes=PES.PES()
state_dict = torch.load("EANN.pth",map_location='cpu')
state_dict = torch.load("REANN.pth",map_location='cpu')
new_state_dict = OrderedDict()
for k, v in state_dict['eannparam'].items():
if k[0:7]=="module.":
Expand All @@ -17,6 +17,6 @@ def jit_pes():
for params in scripted_pes.parameters():
params.requires_grad=False
scripted_pes.to(torch.double)
scripted_pes.save("EANN_DM_DOUBLE.pt")
scripted_pes.save("REANN_DM_DOUBLE.pt")
scripted_pes.to(torch.float32)
scripted_pes.save("EANN_DM_FLOAT.pt")
scripted_pes.save("REANN_DM_FLOAT.pt")
16 changes: 8 additions & 8 deletions reann/lammps/PES.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,16 @@ def __init__(self,nlinked=1):
# global parameters for input_nn
nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn
nl=[128,128] # NN structure
dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer
dropout_p=[0.0,0.0] # dropout probability for each hidden layer
activate = "Relu_like"
table_norm= False
oc_loop = 0
oc_nl = [32,32] # neural network architecture
table_norm= True
oc_loop = 1
oc_nl = [128,128] # neural network architecture
oc_nblock = 1
oc_dropout_p=[0.0,0.0,0.0,0.0]
oc_dropout_p=[0.0,0.0]
oc_activate = "Relu_like"
#========================queue_size sequence for laod data into gpu
oc_table_norm=False
oc_table_norm=True
norbit=None
#======================read input_nn==================================
with open('para/input_nn','r') as f1:
Expand All @@ -41,8 +41,8 @@ def __init__(self,nlinked=1):
outputneuron=1
#======================read input_nn=============================================
nipsin=2
cutoff=4.0
nwave=6
cutoff=4.5
nwave=7
with open('para/input_density','r') as f1:
while True:
tmp=f1.readline()
Expand Down
14 changes: 7 additions & 7 deletions reann/lammps_REANN/PES.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,16 @@ def __init__(self,nlinked=1):
# global parameters for input_nn
nblock = 1 # nblock>=2 resduial NN block will be employed nblock=1: simple feedforward nn
nl=[128,128] # NN structure
dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer
dropout_p=[0.0,0.0] # dropout probability for each hidden layer
activate="Relu_like"
table_norm= False
table_norm= True
oc_loop = 0
oc_nl = [32,32] # neural network architecture
oc_nl = [128,128] # neural network architecture
oc_nblock = 1
oc_dropout_p=[0.0,0.0,0.0,0.0]
oc_dropout_p=[0.0,0.0]
oc_activate="Relu_like"
#========================queue_size sequence for laod data into gpu
oc_table_norm=False
oc_table_norm= True
norbit=None
#======================read input_nn==================================
with open('para/input_nn','r') as f1:
Expand All @@ -41,8 +41,8 @@ def __init__(self,nlinked=1):
outputneuron=1
#======================read input_nn=============================================
nipsin=2
cutoff=4.0
nwave=6
cutoff=4.5
nwave=7
with open('para/input_density','r') as f1:
while True:
tmp=f1.readline()
Expand Down
Binary file added reann/manual/manual.pdf
Binary file not shown.
13 changes: 7 additions & 6 deletions reann/pes/PES.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@ def __init__(self,nlinked=1):
nl=[128,128] # NN structure
dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer
activate = 'Relu_like'
table_norm= False
oc_loop = 0
oc_nl = [32,32] # neural network architecture
table_norm= True
oc_loop = 1
oc_nl = [128,128] # neural network architecture
oc_nblock = 1
oc_dropout_p=[0.0,0.0,0.0,0.0]
oc_activate = 'Relu_like'
#========================queue_size sequence for laod data into gpu
oc_table_norm=False
oc_table_norm=True
norbit= None
#======================read input_nn==================================
with open('para/input_nn','r') as f1:
Expand All @@ -42,8 +42,9 @@ def __init__(self,nlinked=1):
outputneuron=1
#======================read input_nn=============================================
nipsin=2
cutoff=4.0
nwave=6
cutoff=4.5
nwave=7
neigh_atoms=150
with open('para/input_density','r') as f1:
while True:
tmp=f1.readline()
Expand Down
13 changes: 7 additions & 6 deletions reann/pol/PES.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@ def __init__(self,nlinked=1):
nl=[128,128] # NN structure
dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer
activate = 'Relu_like'
table_norm= False
oc_loop = 0
oc_nl = [32,32] # neural network architecture
table_norm= True
oc_loop = 1
oc_nl = [128,128] # neural network architecture
oc_nblock = 1
oc_dropout_p=[0.0,0.0,0.0,0.0]
oc_activate = 'Relu_like'
#========================queue_size sequence for laod data into gpu
oc_table_norm=False
oc_table_norm=True
norbit=None
#======================read input_nn==================================
with open('para/input_nn','r') as f1:
Expand All @@ -42,8 +42,9 @@ def __init__(self,nlinked=1):
outputneuron=1
#======================read input_nn=============================================
nipsin=2
cutoff=4.0
nwave=6
cutoff=4.5
nwave=7
neigh_atoms=150
with open('para/input_density','r') as f1:
while True:
tmp=f1.readline()
Expand Down
1 change: 1 addition & 0 deletions reann/run/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from src.density import *
from src.MODEL import *
from src.EMA import *
from torch.nn.parallel import DistributedDataParallel as DDP
if activate=='Tanh_like':
from src.activate import Tanh_like as actfun
else:
Expand Down
1 change: 0 additions & 1 deletion reann/src/read.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from src.gpu_sel import *
# used for DDP
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP

# open a file for output information in iterations
fout=open('nn.err','w')
Expand Down
11 changes: 6 additions & 5 deletions reann/tdm/PES.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@ def __init__(self,nlinked=1):
nl=[128,128] # NN structure
dropout_p=[0.0,0.0,0.0] # dropout probability for each hidden layer
activate = 'Relu_like'
table_norm = False
oc_loop = 0
table_norm = True
oc_loop = 1
oc_nl = [32,32] # neural network architecture
oc_nblock = 1
oc_dropout_p=[0.0,0.0,0.0,0.0]
oc_activate = 'Relu_like'
#========================queue_size sequence for laod data into gpu
oc_table_norm=False
oc_table_norm=True
norbit=None
#======================read input_nn==================================
with open('para/input_nn','r') as f1:
Expand All @@ -42,8 +42,9 @@ def __init__(self,nlinked=1):
outputneuron=3
#======================read input_nn=============================================
nipsin=2
cutoff=4.0
nwave=6
cutoff=4.5
nwave=7
neigh_atoms=150
with open('para/input_density','r') as f1:
while True:
tmp=f1.readline()
Expand Down

0 comments on commit ceebc8e

Please sign in to comment.