From 4952d20a1924de7773654bbc06ddcdfdd7760410 Mon Sep 17 00:00:00 2001 From: Ajay Bati Date: Mon, 10 Apr 2023 01:22:01 -0400 Subject: [PATCH] adding basic example --- README.md | 29 +++++++++++++++++++++++++++- examples/capiTester.f90 | 3 +++ fLibrary/Makefile | 6 +++--- fLibrary/reader.f90 | 23 +++++++++++----------- goldenFiles/gemm_small/gemm_small.py | 28 +++++++++++++++++---------- test/Makefile | 4 ++-- 6 files changed, 65 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index ec8a7bb..376c7c9 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,34 @@ We have minimal dependencies. For example, on MacOS you can get away with just ``` brew install wget make cmake coreutils gcc -pip install torch onnx numpy fypp onnxruntime +pip install torch onnx numpy fypp onnxruntime pandas +``` +## Basic Example +Here is a quick example of how **roseNNa** works. With just a few steps, you can see how to convert a basic feed-forward neural network originally built with PyTorch into usable, accurate code in Fortran. + +First `cd` into the `fLibrary/` directory. + +``` bash +#file to create pytorch model and convert to ONNX +python3 ../goldenFiles/gemm_small/gemm_small.py +``` +``` bash +#read and interpret the correspoding output files from last step +python3 modelParserONNX.py -w ../goldenFiles/gemm_samll/gemm_small.onnx -f ../goldenFiles/gemm_samll/gemm_small_weights.onnx +``` +``` bash +#compile the library +make library +``` +``` bash +#compile "source files" (capiTester.f90), link to the library file created, and run +gfortran -c ../examples/capiTester.f90 -IobjFiles/ +gfortran -o flibrary libcorelib.a capiTester.o +./flibrary +``` +``` bash +#check whether python output from PyTorch model = roseNNa's output +python3 ../test/testChecker.py ``` ## Compiling roseNNa diff --git a/examples/capiTester.f90 b/examples/capiTester.f90 index 5c8303a..aef705e 100644 --- a/examples/capiTester.f90 +++ b/examples/capiTester.f90 @@ -11,6 +11,9 @@ program name CALL use_model(inputs, output) + open(1, file = "test.txt") + WRITE(1, *) SHAPE(output) + WRITE(1, *) PACK(RESHAPE(output,(/SIZE(output, dim = 2), SIZE(output, dim = 1)/), order = [2, 1]),.true.) print *, output end program name diff --git a/fLibrary/Makefile b/fLibrary/Makefile index 7eec5cc..0753fb9 100644 --- a/fLibrary/Makefile +++ b/fLibrary/Makefile @@ -16,15 +16,15 @@ output: $(COMP) $(OBJ2) $(FC) $(FFLAGS) -c $< -o $@ -preprocess: modelParserONNX.py +#preprocess: modelParserONNX.py # arg1 = model structure file (.onnx format) # arg2 (optional) = weights file (.onnx format) - python3 modelParserONNX.py -f $(args) + #python3 modelParserONNX.py -f $(args) #for *.mod and *.o files - mkdir -p objFiles library: output + mkdir -p objFiles ar crv libcorelib.a $(COMP) $(OBJ2) clean: diff --git a/fLibrary/reader.f90 b/fLibrary/reader.f90 index 24c40e6..7d3a319 100644 --- a/fLibrary/reader.f90 +++ b/fLibrary/reader.f90 @@ -34,7 +34,7 @@ module reader INTEGER :: readOrNot contains - + subroutine initialize() bind(c,name="initialize") !add arguments for location of onnxModel.txt and onnxWeights.txt INTEGER :: Reason CHARACTER(len = 10), ALLOCATABLE, DIMENSION(:) :: name @@ -50,7 +50,7 @@ subroutine initialize() bind(c,name="initialize") !add arguments for location of open(11, file = "onnxWeights.txt") read(10, *) numLayers - + readloop: DO i = 1, numLayers read(10, *, IOSTAT=Reason) layerName if (Reason < 0) then @@ -93,7 +93,7 @@ subroutine initialize() bind(c,name="initialize") !add arguments for location of end if - + END DO readloop end subroutine @@ -185,16 +185,16 @@ subroutine read_conv(file1, file2) read(file2, *) largeWeights conv(1)%weights = largeWeights DEALLOCATE(largeWeights) - - + + read(file1, *) w_dim1 ALLOCATE(biases(w_dim1)) read(file2, *) biases conv(1)%biases = biases DEALLOCATE(biases) - + convLayers = [convLayers, conv] DEALLOCATE(conv) @@ -211,13 +211,13 @@ subroutine read_lstm(file1, file2, readOrNot) read(file2, *) midWeights lstm(1)%wih = midWeights DEALLOCATE(midWeights) - + read(file1, *) w_dim1, w_dim2, w_dim3 ALLOCATE(midWeights(w_dim1,w_dim2,w_dim3)) read(file2, *) midWeights lstm(1)%whh = midWeights DEALLOCATE(midWeights) - + read(file1, *) w_dim1 ALLOCATE(biases(w_dim1)) @@ -230,7 +230,7 @@ subroutine read_lstm(file1, file2, readOrNot) read(file2, *) biases lstm(1)%bhh = biases DEALLOCATE(biases) - + if (readOrNot .eq. 1) then read(file1, *) w_dim1, w_dim2, w_dim3 ALLOCATE(midWeights(w_dim1,w_dim2,w_dim3)) @@ -244,7 +244,7 @@ subroutine read_lstm(file1, file2, readOrNot) lstm(1)%cell = midWeights DEALLOCATE(midWeights) endif - + lstmLayers = [lstmLayers, lstm] @@ -287,6 +287,5 @@ subroutine read_linear(file1, file2) end subroutine - -end module +end module diff --git a/goldenFiles/gemm_small/gemm_small.py b/goldenFiles/gemm_small/gemm_small.py index be65f4f..cd05cfb 100644 --- a/goldenFiles/gemm_small/gemm_small.py +++ b/goldenFiles/gemm_small/gemm_small.py @@ -4,6 +4,14 @@ import os import timeit import numpy as np +import pathlib +import sys, getopt + +opts, args = getopt.getopt(sys.argv[1:],"n") +produce = True +for opt, _ in opts: + if opt == "-n": + produce = False class NN(nn.Module): def __init__(self): super(NN, self).__init__() @@ -30,16 +38,16 @@ def forward(self, inp): model = NN() inp = torch.ones(1,2) - -with open("inputs.fpp",'w') as f: - inputs = inp.flatten().tolist() - inpShapeDict = {'inputs': list(inp.shape)} - inpDict = {'inputs':inputs} - f.write(f"""#:set inpShape = {inpShapeDict}""") - f.write("\n") - f.write(f"""#:set arrs = {inpDict}""") - f.write("\n") - f.write("a") +if produce: + with open("inputs.fpp",'w') as f: + inputs = inp.flatten().tolist() + inpShapeDict = {'inputs': list(inp.shape)} + inpDict = {'inputs':inputs} + f.write(f"""#:set inpShape = {inpShapeDict}""") + f.write("\n") + f.write(f"""#:set arrs = {inpDict}""") + f.write("\n") + f.write("a") def stringer(mat): s = "" diff --git a/test/Makefile b/test/Makefile index 6d8266b..1fe0863 100644 --- a/test/Makefile +++ b/test/Makefile @@ -36,8 +36,8 @@ capi: capi.c modelCreator.o ./capi ex1: $(DIR)/modelParserONNX.py - python3 $(MAIN)/goldenFiles/$(case)/$(case).py - python3 $(DIR)/modelParserONNX.py -f $(MAIN)/goldenFiles/$(case)/$(case).onnx -w $(MAIN)/goldenFiles/$(case)/$(case)_weights.onnx -i $(MAIN)/goldenFiles/$(case)/$(case)_inferred.onnx 1>/dev/null + python3 $(MAIN)goldenFiles/$(case)/$(case).py + python3 $(DIR)/modelParserONNX.py -f $(MAIN)goldenFiles/$(case)/$(case).onnx -w $(MAIN)goldenFiles/$(case)/$(case)_weights.onnx -i $(MAIN)goldenFiles/$(case)/$(case)_inferred.onnx 1>/dev/null graphs: output