-
Notifications
You must be signed in to change notification settings - Fork 189
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
644 changed files
with
32,290 additions
and
66,238 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
[submodule "third_party/clipp"] | ||
path = third_party/clipp | ||
url = https://github.com/muellan/clipp.git | ||
[submodule "third_party/xtl"] | ||
path = third_party/xtl | ||
url = https://github.com/QuantStack/xtl.git | ||
[submodule "third_party/xtensor"] | ||
path = third_party/xtensor | ||
url = https://github.com/QuantStack/xtensor.git |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
cmake_minimum_required(VERSION 3.8) | ||
project("nncase") | ||
|
||
# Download automatically, you can also just copy the conan.cmake file | ||
if(NOT EXISTS "${CMAKE_BINARY_DIR}/conan.cmake") | ||
message(STATUS "Downloading conan.cmake from https://github.com/conan-io/cmake-conan") | ||
file(DOWNLOAD "https://github.com/conan-io/cmake-conan/raw/v0.14/conan.cmake" | ||
"${CMAKE_BINARY_DIR}/conan.cmake") | ||
endif() | ||
|
||
include(${CMAKE_BINARY_DIR}/conan.cmake) | ||
|
||
if (NOT NNCASE_TARGET) | ||
message(FATAL_ERROR "Please define NNCASE_TARGET") | ||
else() | ||
if (WIN32) | ||
add_definitions(/DNNCASE_TARGET=${NNCASE_TARGET}) | ||
else() | ||
add_definitions(-DNNCASE_TARGET=${NNCASE_TARGET}) | ||
endif() | ||
endif() | ||
|
||
conan_check() | ||
conan_add_remote(NAME bincrafts URL https://api.bintray.com/conan/bincrafters/public-conan) | ||
conan_cmake_run(CONANFILE conanfile.txt | ||
BASIC_SETUP CMAKE_TARGETS | ||
BUILD missing) | ||
include(ExternalProject) | ||
|
||
set(THIRD_PARTY ${CMAKE_CURRENT_LIST_DIR}/third_party) | ||
|
||
if (MSVC_VERSION GREATER_EQUAL "1900") | ||
include(CheckCXXCompilerFlag) | ||
CHECK_CXX_COMPILER_FLAG("/std:c++latest" _cpp_latest_flag_supported) | ||
if (_cpp_latest_flag_supported) | ||
add_compile_options("/std:c++latest") | ||
add_definitions(/D_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS) | ||
endif() | ||
else() | ||
add_compile_options(-Wno-multichar -std=c++17) | ||
endif() | ||
|
||
add_subdirectory(src/cli) | ||
add_subdirectory(src/common) | ||
add_subdirectory(src/ir) | ||
add_subdirectory(src/transforms) | ||
add_subdirectory(src/importer) | ||
add_subdirectory(src/data) | ||
add_subdirectory(src/scheduler) | ||
add_subdirectory(src/evaluator) | ||
add_subdirectory(src/codegen) | ||
add_subdirectory(src/runtime) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
{ | ||
"configurations": [ | ||
{ | ||
"name": "x64-Debug", | ||
"generator": "Ninja", | ||
"configurationType": "Debug", | ||
"inheritEnvironments": [ "msvc_x64_x64" ], | ||
"buildRoot": "${projectDir}\\out\\build\\${name}", | ||
"installRoot": "${projectDir}\\out\\install\\${name}", | ||
"cmakeCommandArgs": "", | ||
"buildCommandArgs": "-v", | ||
"ctestCommandArgs": "", | ||
"variables": [ | ||
{ | ||
"name": "NNCASE_TARGET", | ||
"value": "k210" | ||
} | ||
] | ||
}, | ||
{ | ||
"name": "x64-Release", | ||
"generator": "Ninja", | ||
"configurationType": "RelWithDebInfo", | ||
"buildRoot": "${projectDir}\\out\\build\\${name}", | ||
"installRoot": "${projectDir}\\out\\install\\${name}", | ||
"cmakeCommandArgs": "", | ||
"buildCommandArgs": "-v", | ||
"ctestCommandArgs": "", | ||
"inheritEnvironments": [ "msvc_x64_x64" ], | ||
"variables": [ | ||
{ | ||
"name": "NNCASE_TARGET", | ||
"value": "k210" | ||
} | ||
] | ||
} | ||
] | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,99 +1,140 @@ | ||
nncase | ||
========================================= | ||
[](https://ci.appveyor.com/project/sunnycase/nncase/branch/master) | ||
|
||
`nncase` is a cross-platform neural network optimization toolkit for fast inference. | ||
|
||
## Usage | ||
Download prebuilt binaries from [Release](https://github.com/kendryte/nncase/releases). | ||
|
||
`ncc -i <input format> -o <output format> [--dataset <dataset path>] [--postprocess <dataset postprocess>] [--weights-bits <weights quantization bits>] <input path> <output path>` | ||
|
||
- `-i` Input format | ||
|
||
| value | description | | ||
|-------|------------------ | | ||
|tflite|`.tflite` TFLite model | ||
|paddle|`__model__` PaddlePaddle model | ||
|caffe|`.caffemodel` Caffe model | ||
|k210model|`.kmodel` K210 model (Only supported in inference mode) | ||
|
||
- `-o` Output format | ||
<div align="center"> | ||
<img src="docs/logo.png" width="400" alt="nncase" /> | ||
</div> | ||
|
||
| value | description | | ||
|-------|------------------ | | ||
|k210model|`.kmodel` K210 model | ||
|tf|`.pb` TensorFlow model | ||
|tflite|`.tflite` TFLite model | ||
|inference|`.bin` Model's raw output (Only support k210model input) | ||
|
||
- `--inference-type` Inference type | ||
|
||
| value | description | | ||
|-------|------------------ | | ||
|uint8| Use quantized kernels (default) | ||
|float| Use float kernels | ||
|
||
- `--dataset` Dataset path, **required** when the output format is `inference` or `k210model` with inference type equals `uint8`. | ||
[](https://raw.githubusercontent.com/kendryte/nncase/master/LICENSE) | ||
[](https://ci.appveyor.com/project/sunnycase/nncase/branch/master) | ||
|
||
- `--postprocess` Dataset postprocess method | ||
`nncase` is a neural network compiler for AI accelerators. | ||
|
||
| value | description | | ||
|-------|------------------ | | ||
|0to1|normalize images to [0, 1] | ||
|n1to1|normalize images to [-1, 1] | ||
`nncase` 是一个为 AI 加速器设计的神经网络编译器。 | ||
|
||
- `--weights-bits` Weights quantization bits | ||
## Install | ||
Download prebuilt binaries from [Release](https://github.com/kendryte/nncase/releases). | ||
|
||
| value | description | | ||
|-------|------------------ | | ||
|8|8bit quantization [0, 255] | ||
|16|16bit quantization [0, 65535] | ||
## 安装 | ||
下载预编译的二进制文件 [Release](https://github.com/kendryte/nncase/releases)。 | ||
|
||
- `--float-fc` Use float fullyconnected kernels. | ||
--- | ||
|
||
- `--channelwise-output` Use channelwise quantization for output layers. | ||
### Support commonly used CNN networks | ||
### 支持常用的 CNN 网络 | ||
|
||
## Examples | ||
- Convert TFLite model to K210 model. | ||
- MobileNetV1/V2 | ||
- YOLOV1 YOLOV3 | ||
|
||
`ncc -i tflite -o k210model --dataset ./images ./mbnetv1.tflite ./mbnetv1.kmodel` | ||
## Features | ||
|
||
- Convert PaddlePaddle model to TensorFlow model. | ||
- Supports multiple inputs and outputs and multi-branch structure | ||
- Static memory allocation, no heap memory acquired | ||
- Operators fusion and optimizations | ||
- Support float and quantized uint8 inference | ||
- Support post quantization from float model with calibration dataset | ||
- Flat model with zero copy loading | ||
|
||
`ncc -i paddle -o tf ./MobileNetV1_pretrained ./mbnetv1.pb` | ||
## 功能 | ||
|
||
- Inference K210 model and get output binaries. | ||
- 支持多输入输出网络,支持多分支结构 | ||
- 静态内存分配,不需要堆内存 | ||
- 算子合并和优化 | ||
- 支持 float 和量化 uint8 推理 | ||
- 支持训练后量化,使用浮点模型和量化校准集 | ||
- 平坦模型,支持零拷贝加载 | ||
|
||
`ncc -i k210model -o inference --dataset ./images ./mbnetv1.kmodel ./output` | ||
## Usage | ||
## 使用方法 | ||
|
||
- Tutorials | ||
- 20 classes object detection | ||
- Iris flowers classification | ||
[Usage 使用方法](USAGE.md) | ||
|
||
See https://github.com/kendryte/nncase/tree/master/examples | ||
[Examples 例子](./examples) | ||
|
||
## Supported layers | ||
## Supported operators | ||
## 支持的算子 | ||
|
||
| layer | parameters | | ||
| Operator | Is Supported | | ||
|-------|------------------ | | ||
| Conv2d | kernel={3x3,1x1} stride={1,2} padding=same *| | ||
| DepthwiseConv2d | kernel={3x3,1x1} stride={1,2} padding=same *| | ||
| FullyConnected | | | ||
| Add | | | ||
| MaxPool2d | | | ||
| AveragePool2d | | | ||
| GlobalAveragePool2d | | | ||
| BatchNormalization | | | ||
| BiasAdd | | | ||
| Relu | | | ||
| Relu6 | | | ||
| LeakyRelu | | | ||
| Concatenation | | | ||
| L2Normalization | | | ||
| Sigmoid | | | ||
| Softmax | | | ||
| Flatten | | | ||
| ResizeNearestNeighbor | | | ||
|
||
\* When using TensorFlow Conv2d/DepthwiseConv2d kernel=3x3 stride=2 padding=same, you must first use tf.pad([[0,0],[1,1],[1,1],[0,0]]) to pad the input and then use Conv2d/DepthwiseConv2d with `valid` padding. | ||
| Add |✅| | ||
| ArgMax |❌| | ||
| ArgMin |❌| | ||
| AveragePool2D |✅| | ||
| BatchToSpaceND |❌| | ||
| Cast |❌| | ||
| Concatenation |✅| | ||
| Conv2D |✅| | ||
| DepthwiseConv2D |✅| | ||
| Div |✅| | ||
| Equal |❌| | ||
| Exp |✅| | ||
| ExpandDims |❌| | ||
| Floor |✅| | ||
| FullyConnected |✅| | ||
| Gather |❌| | ||
| Greater |❌| | ||
| GreaterEqual |❌| | ||
| MaxPool2D |✅| | ||
| Mean |✅| | ||
| Mul |✅| | ||
| L2Normalization |✅| | ||
| L2Pool2D |❌| | ||
| LessEqual |❌| | ||
| Log |✅| | ||
| Logistic |❌| | ||
| LogSoftmax |❌| | ||
| Maximum |✅| | ||
| Minimum |✅| | ||
| Neg |✅| | ||
| NotEqual |❌| | ||
| Pack |❌| | ||
| Pad |✅| | ||
| Pow |❌| | ||
| PRelu |❌| | ||
| ReduceMax |✅| | ||
| ReduceProd |❌| | ||
| Reshape |✅| | ||
| ResizeBilinear |✅| | ||
| Rsqrt |✅| | ||
| Select |❌| | ||
| Shape |❌| | ||
| Sin |✅| | ||
| Slice |❌| | ||
| Softmax |✅| | ||
| SpaceToDepth |❌| | ||
| SpaceToBatchND |❌| | ||
| SparseToDense |❌| | ||
| Split |❌| | ||
| Sqrt |✅| | ||
| Square |✅| | ||
| Squeeze |❌| | ||
| Sub |✅| | ||
| Sum |✅| | ||
| Tile |❌| | ||
| TopK |❌| | ||
| Transpose |✅| | ||
| TransposeConv |❌| | ||
| LogicalOr |❌| | ||
| OneHot |❌| | ||
| LogicalAnd |❌| | ||
| LogicalNot |❌| | ||
| UnPack |❌| | ||
| ReduceMin |✅| | ||
| FloorDiv |❌| | ||
| ReduceAny |❌| | ||
| ZerosLike |❌| | ||
| Fill |❌| | ||
| FloorMod |❌| | ||
| Range |❌| | ||
| ResizeNearesetNeighbor |✅| | ||
| LeakyRelu |✅| | ||
| MirrorPad |❌| | ||
| Abs |✅| | ||
| SplitV |❌| | ||
| Unique |❌| | ||
| Ceil |✅| | ||
| Reverse |❌| | ||
| AddN |❌| | ||
| GatherND |❌| | ||
| Cos |✅| | ||
| Where |❌| | ||
| Rank |❌| | ||
| Elu |❌| | ||
| ReverseSequence |❌| |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
``` | ||
DESCRIPTION | ||
NNCASE model compiler and inference tool. | ||
SYNOPSIS | ||
ncc compile <input file> <output file> -i <input format> [-o <output | ||
format>] [-t <target>] [--dataset <dataset path>] [--inference-type | ||
<inference type>] [--input-mean <input mean>] [--input-std <input std>] | ||
[--dump-ir] [-v] | ||
ncc infer <input file> <output path> --dataset <dataset path> [--input-mean | ||
<input mean>] [--input-std <input std>] [-v] | ||
OPTIONS | ||
compile | ||
<input file> input file | ||
<output file> output file | ||
-i, --input-format input file format: e.g. tflite | ||
-o, --output-format output file format: e.g. kmodel, default is kmodel | ||
-t, --target target arch: e.g. cpu, k210, default is k210 | ||
--dataset calibration dataset, used in post quantization | ||
--inference-type inference type: e.g. float, uint8 default is uint8 | ||
--input-mean input mean, default is 0.000000 | ||
--input-std input std, default is 1.000000 | ||
--dump-ir dump nncase ir to .dot files | ||
infer | ||
<input file> input kmodel | ||
<output path> inference result output directory | ||
--dataset input dataset to inference | ||
--input-mean input mean, default is 0.000000 | ||
--input-std input std, default is 1.000000 | ||
-v, --version show version | ||
``` |
Oops, something went wrong.