Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
tao authored and tao committed Mar 25, 2020
0 parents commit 55edecb
Show file tree
Hide file tree
Showing 15 changed files with 361 additions and 0 deletions.
90 changes: 90 additions & 0 deletions POSCAR
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
-263.00801652 -279.13667502 -14.90404170
1.00000000000000
20.0000000000000000 0.0000000000000000 0.0000000000000000
0.0000000000000000 20.0000000000000000 0.0000000000000000
0.0000000000000000 0.0000000000000000 20.0000000000000000
Cu
82
Direct
0.5000000000000000 0.5000000000000000 0.5000000000000000 F F F
0.3182449999999974 0.4368800000000022 0.3483593999999997 F F F
0.3357049999999973 0.5860550000000018 0.3470023499999968 F F F
0.4013449999999992 0.6141549999999967 0.1906960000000026 F F F
0.5738600000000034 0.6296899999999965 0.3108859999999964 F F F
0.5766199999999984 0.7630949999999999 0.3069780000000009 F F F
0.7641149999999968 0.7624250000000004 0.3846045499999988 F F F
0.2388949999999994 0.5207400000000035 0.3059725000000029 F F F
0.4217250000000021 0.5005600000000001 0.3718773499999983 F F F
0.4726599999999976 0.5077149999999975 0.2040140000000008 F F F
0.2555600000000027 0.6834550000000021 0.3353859999999997 F F F
0.4888750000000002 0.6955349999999996 0.2516869999999969 F F F
0.6667800000000028 0.6945049999999995 0.3564805500000006 F F F
0.6654249999999990 0.8317350000000019 0.3550375499999987 F F F
0.2068299999999965 0.4553650000000005 0.4037633000000014 F F F
0.2675399999999968 0.4402299999999997 0.2131200000000035 F F F
0.2847500000000025 0.6128149999999977 0.2359190000000027 F F F
0.4577400000000011 0.5861400000000003 0.2938830000000010 F F F
0.5214599999999976 0.6146450000000030 0.1587705000000028 F F F
0.4545749999999984 0.7716950000000011 0.3531909000000013 F F F
0.3612400000000022 0.5095949999999974 0.2554135000000031 F F F
0.3710599999999999 0.6913399999999967 0.2882090000000019 F F F
0.5177550000000011 0.5711299999999966 0.4029626000000022 F F F
0.6108400000000032 0.6906699999999972 0.2083009999999987 F F F
0.3918499999999980 0.4385850000000033 0.4697919999999982 F F F
0.1948900000000009 0.5197799999999972 0.5114409999999978 F F F
0.3125349999999969 0.6491850000000028 0.4510245000000026 F F F
0.2994249999999994 0.4940050000000014 0.6526110000000003 F F F
0.2106250000000003 0.6177350000000033 0.7482060000000033 F F F
0.4103449999999995 0.6277649999999966 0.5888824999999969 F F F
0.3215900000000005 0.7459899999999990 0.6873755000000017 F F F
0.6221400000000017 0.6205500000000015 0.4456435000000027 F F F
0.5218550000000022 0.7636550000000000 0.5214969999999965 F F F
0.5212700000000012 0.6004700000000014 0.7276349999999994 F F F
0.4309950000000029 0.7185699999999997 0.8251295000000027 F F F
0.6306599999999989 0.7317600000000013 0.6675214999999994 F F F
0.2092600000000004 0.5911949999999990 0.4076954499999985 F F F
0.1889150000000015 0.4444000000000017 0.6136745000000019 F F F
0.4070699999999974 0.5729049999999987 0.4755735000000030 F F F
0.3027450000000016 0.5724149999999995 0.5526210000000020 F F F
0.2120800000000003 0.6958750000000009 0.6479835000000023 F F F
0.4125950000000032 0.7144049999999993 0.4905784999999980 F F F
0.3259499999999989 0.8262399999999985 0.5918869999999998 F F F
0.5416450000000026 0.8360050000000001 0.4136176499999991 F F F
0.2958700000000007 0.4206849999999989 0.7526140000000012 F F F
0.4107199999999978 0.5469000000000008 0.6909874999999985 F F F
0.3208950000000002 0.6676149999999978 0.7875190000000032 F F F
0.5201350000000033 0.6816200000000023 0.6274719999999974 F F F
0.4318299999999979 0.7995850000000004 0.7229664999999983 F F F
0.7239100000000036 0.6872949999999989 0.4752705000000006 F F F
0.6314700000000002 0.8131699999999995 0.5657860000000028 F F F
0.6298450000000031 0.6526849999999982 0.7656865000000010 F F F
0.1778150000000025 0.3868149999999986 0.4988829999999993 F F F
0.3036099999999990 0.5178700000000021 0.4413621500000033 F F F
0.2053050000000027 0.6482000000000028 0.5259854999999973 F F F
0.3398150000000015 0.7624050000000011 0.3951770400000001 F F F
0.1983150000000009 0.4940349999999967 0.7310269999999974 F F F
0.4001450000000020 0.4989149999999967 0.5769889999999975 F F F
0.3114300000000014 0.6207500000000010 0.6684934999999967 F F F
0.5114299999999972 0.6331150000000036 0.5111249999999998 F F F
0.4234000000000009 0.7542899999999975 0.6059059999999974 F F F
0.6325900000000004 0.7638749999999987 0.4521744999999981 F F F
0.5329150000000027 0.8891449999999992 0.5324475000000035 F F F
0.4209200000000024 0.5926199999999966 0.8078684999999979 F F F
0.6184849999999997 0.6097749999999991 0.6469009999999997 F F F
0.5308050000000009 0.7250399999999999 0.7460255000000018 F F F
0.7290599999999969 0.7352050000000006 0.5906595000000010 F F F
0.2272049999999979 0.7366749999999982 0.4404474500000006 F F F
0.1730099999999979 0.3212100000000007 0.6014830000000018 F F F
0.2881100000000032 0.4412400000000005 0.5381034999999983 F F F
0.2014599999999973 0.5704300000000018 0.6288805000000011 F F F
0.4281099999999967 0.6554300000000026 0.3882653000000005 F F F
0.3112849999999980 0.7036900000000017 0.5650789999999972 F F F
0.5381599999999978 0.7017049999999969 0.4093769500000022 F F F
0.4196099999999987 0.8390199999999979 0.4804490000000001 F F F
0.3109899999999968 0.5426950000000019 0.7704299999999975 F F F
0.5099650000000011 0.5563399999999987 0.6105815000000021 F F F
0.4213149999999999 0.6725199999999987 0.7071095000000014 F F F
0.6195450000000022 0.6870550000000009 0.5494709999999969 F F F
0.5326700000000031 0.8064499999999981 0.6427729999999983 F F F
0.7357650000000007 0.8144750000000016 0.4932960000000008 F F F
0.5306700000000006 0.6437650000000019 0.8462474999999969 F F F
50 changes: 50 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# Neural Network Model for CO Adsorption on Copper Nanoparticle
by Yufeng Huang<sup>1</sup>

<sup>1</sup> Department of Chemical Engineering, California Institute of Technology, Pasadena, CA, U.S.A.

## Descriptions
The code implements a neural network model to fit and predict CO adsorption energies on the surface sites of nanoparticles.

Please refer to our manuscript on ACS Energy Letter for more information:

>**Y. Huang et. al.,** identification of the selective sites for the electrochemical reduction of CO to C<sub>2+</sub> products on copper nanoparticles, *ACS Energy Lett.* **2018**, 3, 12, 2983-2988.

In the supporting information of the manuscript, we describe the structure of the neural network in detail. The code presented on this page is the actual implementation.

## Instructions

The neural network is implemented in Python and utilizes the Tensorflow and numpy numerical libraries. To run the code, Python, Tensorflow and numpy should be installed first.

To download the code on this page to run locally, type the following in the terminal (for MacOS and Linux):

```
% git clone https://gitlab.com/yufeng.huang/cunp_coads.git
```

Once the source code is downloaded, you can change to its directory by typing:

```
% cd cunp_coads
```

To run the code, type:
```
% python main.py
```

To generate features from VASP POSCAR, use the following script:
```
% python poscar2feats.py POSCAR
```

where POSCAR is the name of the VASP input structural file.

**WARNING**

1. The atomic coordinates in POSCAR must be fractional or direct

2. The target surface site must be at the center of the box, which is (0.5, 0.5, 0.5) in fractional coordiante.

3. The current code is implemented only for surfaces of a single element, as described in our paper
Binary file added energies.npz
Binary file not shown.
Binary file added features.npz
Binary file not shown.
2 changes: 2 additions & 0 deletions log/checkpoint
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "model.ckpt"
all_model_checkpoint_paths: "model.ckpt"
Binary file added log/featParams.npz
Binary file not shown.
Binary file added log/model.ckpt.data-00000-of-00001
Binary file not shown.
Binary file added log/model.ckpt.index
Binary file not shown.
Binary file added log/model.ckpt.meta
Binary file not shown.
Binary file added log/nnParams.npz
Binary file not shown.
13 changes: 13 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import py_util as pyu
import py_func as pyf
import numpy as np

nnParams = pyu.loadNN("log/nnParams.npz")
featParams = pyu.loadFeat("log/featParams.npz")

featSets = np.load("features.npz")
engySets = np.load("energies.npz")
featSets = [featSets[key] for key in featSets.files][0]
engySets = [engySets[key] for key in engySets.files][0]

pyf.trainEL_getError(featSets, engySets, featParams, nnParams)
67 changes: 67 additions & 0 deletions poscar2feats.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
#
# Yufeng Huang
# Department of Chemical Engineering
# California Institute of Technology
# 11/07/2018
#

# Generate features from VASP POSCAR

import numpy as np
import argparse

def getCoord(poscar):
with open(poscar,'r') as p:
nAtoms=0
lattice = np.zeros((3,3))
p.readline()
p.readline()
lattice[0,:] = np.array(p.readline().split(),dtype=float)
lattice[1,:] = np.array(p.readline().split(),dtype=float)
lattice[2,:] = np.array(p.readline().split(),dtype=float)
p.readline()
nAtoms = int(p.readline())
if (p.readline().strip(" "))[0].upper() == "S":
p.readline()
R = np.zeros((nAtoms,3))
for i in range(nAtoms):
R[i] = np.array(p.readline().split()[:3],dtype=float)
R = R - [0.5,0.5,0.5]
return nAtoms, R.dot(lattice)


def getCos(x, numBasis):
nodes = np.linspace(-1,1,numBasis)
y = x[:,np.newaxis] - nodes
h = 2/(numBasis-1)
zeroMask = (y ==0)
y[np.abs(y)>h] = 0
y[y!=0] = np.cos(y[y!=0]/h*np.pi)/2+0.5
y[zeroMask] = 1
y[np.abs(x)>1] = 0
return y


def getFeat(nAtoms, coord,n2b,n3b):
Rl = np.sqrt(np.sum(coord**2,axis=1))[1:]
Dc = coord[1:,np.newaxis] - coord[1:]
Dc = np.sqrt(np.sum(Dc**2,axis=2))
yR = np.sum(getCos(Rl/4-1,n2b),axis=0)
yD = np.zeros((nAtoms-1,nAtoms-1,n3b))
yD[Dc!=0] = getCos(Dc[Dc!=0]/4-1,n3b)
yD = np.sum(getCos(Rl/4-1,n3b)[:,np.newaxis,:,np.newaxis] * yD[:,:,np.newaxis,:],axis=0)
yD = np.sum(getCos(Rl/4-1,n3b)[:,np.newaxis,:,np.newaxis] * yD[:,:,np.newaxis,:],axis=0)
return np.concatenate([yR, yD.reshape(-1)])


parser = argparse.ArgumentParser()
parser.add_argument("POSCAR", type=str, help="POSCAR file. \
The coordinates must be fractional (or direct). \
The center atom must be in the center, (0.5, 0.5, 0.5)")

args = parser.parse_args()
poscar = getattr(args, "POSCAR")
numA, coord = getCoord(poscar)
features = getFeat(numA, coord, 12, 3)
print(features)
61 changes: 61 additions & 0 deletions py_func.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Yufeng Huang
"""

import tensorflow as tf
import tf_func as tff


def scaleFeat(featParams, feat):
return featParams['featA'] * feat + featParams['featB']


def trainEL_getError(featSets, engySets, featParams, nnParams, logFile="log"):
AdFeatTrain, AdFeatValid, AdFeatTest, _, __, ___ = featSets
AdEngyTrain, AdEngyValid, AdEngyTest, _, __, ___ = engySets

tf_feat = tf.placeholder(tf.float32, (None, featParams['nFeat']))
tf_engy = tf.placeholder(tf.float32, (None))
tf_LR = tf.placeholder(tf.float32)

L3 = tff.getE(tf_feat, featParams['nFeat'], nnParams)

engyLoss = tf.reduce_mean((L3 - tf_engy) ** 2)

with tf.Session() as sess:
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./" + logFile + "/model.ckpt")

AdTrainDict = {tf_feat: scaleFeat(featParams, AdFeatTrain), tf_engy: AdEngyTrain,
tf_LR: nnParams['learningRate']}
AdTestDict = {tf_feat: scaleFeat(featParams, AdFeatTest), tf_engy: AdEngyTest,
tf_LR: nnParams['learningRate']}
AdValidDict = {tf_feat: scaleFeat(featParams, AdFeatValid), tf_engy: AdEngyValid,
tf_LR: nnParams['learningRate']}

eLoss = sess.run(engyLoss, feed_dict=AdTrainDict)
veLoss = sess.run(engyLoss, feed_dict=AdTestDict)
teLoss = sess.run(engyLoss, feed_dict=AdValidDict)

print("Training set RMSE: {:10.4f} eV".format(eLoss**0.5))
print("Validation set RMSE: {:10.4f} eV".format(veLoss**0.5))
print("Testing set RMSE: {:10.4} eV".format(teLoss**0.5))


def getE(feat, featParams, nnParams, logFile="log"):
tf_feat = tf.placeholder(tf.float32, (None, featParams['nFeat']))
L3 = tff.getE(tf_feat, featParams['nFeat'], nnParams)

with tf.Session() as sess:
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./" + logFile + "/model.ckpt")

feedDict = {tf_feat: scaleFeat(featParams, feat)}

engy = sess.run(L3, feed_dict=feedDict)

return engy
28 changes: 28 additions & 0 deletions py_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Yufeng Huang
"""

import numpy as np


def loadNN(paramFile):
nnParams = {}
params = np.load(paramFile)
nnParams['nL1'] = int(params['nL1'])
nnParams['nL2'] = int(params['nL2'])
nnParams['learningRate'] = float(params['learningRate'])
nnParams['nEpoch'] = int(params['nEpoch'])
return nnParams


def loadFeat(paramFile):
featParams = {}
params = np.load(paramFile)
featParams['featA'] = params['featA']
featParams['featB'] = params['featB']
featParams['n2b'] = int(params['n2b'])
featParams['n3b'] = int(params['n3b'])
featParams['nFeat'] = int(params['nFeat'])
return featParams
50 changes: 50 additions & 0 deletions tf_func.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Yufeng Huang
"""

import tensorflow as tf


def getNN(nnIn, nIn, nOut, layer):
with tf.variable_scope(layer, reuse=tf.AUTO_REUSE):
W = tf.get_variable("weights", shape=[nIn, nOut], \
initializer=tf.contrib.layers.xavier_initializer())
B = tf.get_variable("biases", shape=[nOut], \
initializer=tf.zeros_initializer())
nnOut = tf.matmul(nnIn, W) + B
return nnOut


def getE(tf_feat, nFeat, nnParams):
L1 = tf.nn.sigmoid(getNN(tf_feat, nFeat, nnParams['nL1'], 'layer1'))
L2 = tf.nn.sigmoid(getNN(L1, nnParams['nL1'], nnParams['nL2'], 'layer2'))
L3 = tf.reshape(getNN(L2, nnParams['nL2'], 1, 'layer3'), [-1])
return L3


def getAd(tf_feat, nFeat, nnParams):
with tf.variable_scope("adsorption", reuse=tf.AUTO_REUSE):
mu = tf.get_variable("mu", shape=[1], initializer=tf.constant_initializer(-0.3))
L3 = getE(tf_feat, nFeat, nnParams)
L4 = tf.tanh((L3 - mu) / 0.0257) * 0.5 + 0.5
return L4


def getW(layer):
with tf.variable_scope(layer, reuse=tf.AUTO_REUSE):
W = tf.get_variable("weights")
return (W)


def getB(layer):
with tf.variable_scope(layer, reuse=tf.AUTO_REUSE):
B = tf.get_variable("biases")
return B


def getMu():
with tf.variable_scope("adsorption", reuse=tf.AUTO_REUSE):
mu = tf.get_variable("mu")
return mu

0 comments on commit 55edecb

Please sign in to comment.