Skip to content

Commit

Permalink
Merge pull request #47 from JDAI-CV/update_onnx2bnn
Browse files Browse the repository at this point in the history
Update onnx2bnn and convert onnx->daq in ci test
  • Loading branch information
daquexian authored Aug 20, 2019
2 parents 4337638 + c3b6751 commit bad1c0a
Show file tree
Hide file tree
Showing 18 changed files with 120 additions and 338 deletions.
3 changes: 0 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,3 @@
[submodule "third_party/protobuf"]
path = third_party/protobuf
url = https://github.com/protocolbuffers/protobuf
[submodule "third_party/pybind11"]
path = third_party/pybind11
url = https://github.com/pybind/pybind11
4 changes: 1 addition & 3 deletions ci/build_onnx2bnn.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
#! /usr/bin/env bash
set -e

nproc=$(ci/get_cores.sh)

mkdir build_onnx2bnn && cd build_onnx2bnn
cmake ..
cmake --build . -- -j$nproc
cmake --build .
cd -
4 changes: 2 additions & 2 deletions ci/dabnn_build_and_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
- bash: brew install watch gnu-sed
displayName: Install watch and gnu-sed
- bash: ci/build_dabnn.sh
displayName: Build
displayName: Build dabnn
- bash: ci/start_android_emulator.sh
displayName: Start Android Emulator
- template: template_dabnn_run_test.yml
Expand All @@ -49,7 +49,7 @@ jobs:
- bash: brew install watch gnu-sed
displayName: Install watch and gnu-sed
- bash: ci/build_dabnn_v7.sh
displayName: Build
displayName: Build dabnn
- bash: ci/start_android_emulator_v7.sh
displayName: Start Android Emulator
- template: template_dabnn_run_test.yml
8 changes: 8 additions & 0 deletions ci/download_and_convert_models.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#! /usr/bin/env bash

wget "https://drive.google.com/uc?export=download&id=1Xp3HB51H6Nhl6e555ieJubVutQake5sR" -O model_imagenet.onnx
./build_onnx2bnn/tools/onnx2bnn/onnx2bnn model_imagenet.onnx model_imagenet.dab --aggressive --verbose
adb push model_imagenet.dab /data/local/tmp
wget "https://drive.google.com/uc?export=download&id=1zu48CFptAGZ91IDCBPJSPM0bxDuPm9HS" -O model_imagenet_stem.onnx
./build_onnx2bnn/tools/onnx2bnn/onnx2bnn model_imagenet_stem.onnx model_imagenet_stem.dab --aggressive --verbose
adb push model_imagenet_stem.dab /data/local/tmp/
6 changes: 0 additions & 6 deletions ci/download_models.sh

This file was deleted.

17 changes: 6 additions & 11 deletions ci/onnx2bnn_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,29 +51,24 @@ jobs:
steps:
- checkout: self
submodules: true
- template: template_onnx2bnn_build_python_all_version.yml
- task: CopyFiles@2
inputs:
sourceFolder: '.setuptools-cmake-build\tools\onnx2bnn\Release\'
contents: 'onnx2bnn.exe'
targetFolder: $(Build.ArtifactStagingDirectory)
- bash: ci/build_onnx2bnn.sh
displayName: Build
- template: template_onnx2bnn_publish_artifacts.yml
- template: template_onnx2bnn_github_release.yml
- template: template_onnx2bnn_upload_to_pypi.yml
- job: macOS
pool:
vmImage: 'macOS-10.14'
steps:
- checkout: self
submodules: true
- template: template_onnx2bnn_build_python_all_version.yml
- script: 'cp .setuptools-cmake-build/tools/onnx2bnn/onnx2bnn .setuptools-cmake-build/tools/onnx2bnn/onnx2bnn-macos'
- bash: ci/build_onnx2bnn.sh
displayName: Build
- script: 'cp build_onnx2bnn/tools/onnx2bnn/onnx2bnn build_onnx2bnn/tools/onnx2bnn/onnx2bnn-macos'
displayName: 'Rename onnx2bnn'
- task: CopyFiles@2
inputs:
sourceFolder: '.setuptools-cmake-build/tools/onnx2bnn'
sourceFolder: 'build_onnx2bnn/tools/onnx2bnn'
contents: 'onnx2bnn-macos'
targetFolder: $(Build.ArtifactStagingDirectory)
- template: template_onnx2bnn_publish_artifacts.yml
- template: template_onnx2bnn_github_release.yml
- template: template_onnx2bnn_upload_to_pypi.yml
6 changes: 4 additions & 2 deletions ci/template_dabnn_run_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ steps:
displayName: Binary Conv Test
- bash: ci/adb_push_and_run.sh build_dabnn/tests/bgemm_test
displayName: Binary Gemm Test
- bash: ci/download_models.sh
displayName: Download Models
- bash: ci/build_onnx2bnn.sh
displayName: Build onnx2bnn
- bash: ci/download_and_convert_models.sh
displayName: Download and Convert Models
- bash: ci/adb_push_and_run.sh build_dabnn/tests/net_test
displayName: Model Test
6 changes: 0 additions & 6 deletions ci/template_onnx2bnn_build_python.yml

This file was deleted.

20 changes: 0 additions & 20 deletions ci/template_onnx2bnn_build_python_all_version.yml

This file was deleted.

9 changes: 0 additions & 9 deletions ci/template_onnx2bnn_upload_to_pypi.yml

This file was deleted.

1 change: 0 additions & 1 deletion third_party/pybind11
Submodule pybind11 deleted from 97784d
31 changes: 27 additions & 4 deletions tools/onnx2bnn/OnnxConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,10 @@ std::vector<OnnxConverter::BTensor> OnnxConverter::split(
return outputs;
}

void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const std::string &filepath,
const OnnxConverter::Level level) {
std::vector<std::string> OnnxConverter::Convert(
const ONNX_NAMESPACE::ModelProto &model_proto, const std::string &filepath,
const OnnxConverter::Level level,
const std::vector<std::string> &expected_binary_conv_outputs) {
GOOGLE_PROTOBUF_VERIFY_VERSION;

// We recognize binary convolutions in our custom ONNX optimizers.
Expand Down Expand Up @@ -236,6 +237,7 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
inputs.push_back(flat_input);
}

vector<string> binary_conv_outputs;
vector<string> skipped_act;
bool has_reshape = false;
for (const auto &node : model_proto_.graph().node()) {
Expand Down Expand Up @@ -270,7 +272,15 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
}

auto ori_weight_name = m(node.input(1));
const bool binary_conv = (node.domain() == "dabnn");
const bool binary_conv =
(node.domain() == "dabnn") ||
(std::find(expected_binary_conv_outputs.begin(),
expected_binary_conv_outputs.end(),
node.output(0)) !=
expected_binary_conv_outputs.end());
if (binary_conv) {
binary_conv_outputs.push_back(node.output(0));
}
AddConv(m(node.input(0)), strides, pads, dilations, group,
ori_weight_name, bias_name, m(node.output(0)), binary_conv);
VLOG(5) << "Converting Conv completed";
Expand Down Expand Up @@ -472,6 +482,17 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
throw std::invalid_argument("Unsupported operator " + op);
}
}

for (const auto &expected : expected_binary_conv_outputs) {
if (std::find(binary_conv_outputs.begin(), binary_conv_outputs.end(),
expected) == binary_conv_outputs.end()) {
throw std::invalid_argument(
expected +
" is in the list file but not in the ONNX model, please check "
"your list file");
}
}

auto flat_layers = builder_.CreateVector(layers_);
auto flat_inputs = builder_.CreateVector(inputs);
auto flat_tensors = builder_.CreateVector(tensors_);
Expand All @@ -487,6 +508,8 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
ofs.write(reinterpret_cast<char *>(builder_.GetBufferPointer()),
builder_.GetSize());
ofs.close();

return binary_conv_outputs;
}

void OnnxConverter::CalculateCoeff(const ONNX_NAMESPACE::NodeProto &node,
Expand Down
4 changes: 2 additions & 2 deletions tools/onnx2bnn/OnnxConverter.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,9 @@ class OnnxConverter {
kModerate,
kAggressive,
};
void Convert(const ONNX_NAMESPACE::ModelProto &model,
std::vector<std::string> Convert(const ONNX_NAMESPACE::ModelProto &model,
const std::string &filepath,
const Level level=Level::kModerate);
const Level level, const std::vector<std::string> &expected_binary_conv_outputs);
};

template <>
Expand Down
93 changes: 70 additions & 23 deletions tools/onnx2bnn/onnx2bnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,52 +16,91 @@ using std::string;
using std::vector;

void usage(const std::string &filename) {
std::cout << "Usage:" << std::endl;
std::cout << " " << filename
<< " onnx_model output_filename [ --strict | --moderate | "
"--aggressive ] [--binary-list] [--verbose]"
<< std::endl;
std::cout << std::endl;
std::cout << "Options:" << std::endl;
std::cout
<< " --aggressive The default optimization level. In this level, "
"onnx2bnn will mark all convolutions with binary (+1/-1) weights as "
"binary convolutions. It is for the existing BNN models, which may "
"not use the correct padding value. Note: The output of the "
"generated dabnn model is different from that of the ONNX model "
"since the padding value is 0 instead of -1."
<< std::endl;
std::cout << " --moderate This level is for our \"standard\" "
"implementation -- A Conv operator with binary weight and "
"following a -1 Pad operator."
<< std::endl;
std::cout
<< "Usage: " << filename
<< " onnx_model output_filename [--optimize strict|moderate|aggressive]"
<< " --strict In this level, onnx2bnn only recognizes the "
"following natural and correct \"pattern\" of binary convolutions: "
"A Conv operator, whose input is got from a Sign op and a Pad op "
"(the order doesn't matter), and weight is got from a Sign op."
<< std::endl;
std::cout << "Example: " << filename
std::cout
<< " --binary-list A text file containing the **output "
"names** of some convolutions, which will be treated as binary "
"convlutions unconditionally. It is mainly for benchmark purpose."
<< std::endl;
std::cout << std::endl;
std::cout << "Example:" << std::endl;
std::cout << " " << filename
<< " model.onnx model.dab (The optimization leval will be "
"\"aggressive\")"
<< std::endl;
std::cout << "Example: " << filename
<< " model.onnx model.dab --optimize strict (The optimization "
std::cout << " " << filename
<< " model.onnx model.dab --strict (The optimization "
"level will be \"strict\")"
<< std::endl;
}

int main(int argc, char **argv) {
argh::parser cmdl;
cmdl.add_param("optimize");
cmdl.add_param("--binary-list");
cmdl.parse(argc, argv);
google::InitGoogleLogging(cmdl[0].c_str());
FLAGS_alsologtostderr = true;
if (!cmdl(2)) {
usage(cmdl[0]);
return -1;
}
// flags like 'onnx2bnn --strict' is not supported now
for (const auto flag : cmdl.flags()) {
std::cout << "Invalid flag: " << flag << std::endl;
usage(cmdl[0]);
return -2;
if (flag != "strict" && flag != "moderate" && flag != "aggressive" &&
flag != "verbose") {
std::cout << "Invalid flag: " << flag << std::endl;
usage(cmdl[0]);
return -2;
}
}

const std::string opt_level_str =
cmdl("optimize").str().empty() ? "aggressive" : cmdl("optimize").str();

bnn::OnnxConverter::Level opt_level;
if (opt_level_str == "strict") {
bnn::OnnxConverter::Level opt_level =
bnn::OnnxConverter::Level::kAggressive;
if (cmdl["strict"]) {
opt_level = bnn::OnnxConverter::Level::kStrict;
} else if (opt_level_str == "moderate") {
} else if (cmdl["moderate"]) {
opt_level = bnn::OnnxConverter::Level::kModerate;
} else if (opt_level_str == "aggressive") {
} else if (cmdl["aggressive"]) {
opt_level = bnn::OnnxConverter::Level::kAggressive;
} else {
std::cout << "Invalid optimization level: " << opt_level_str
<< std::endl;
usage(cmdl[0]);
return -3;
}

if (cmdl["verbose"]) {
FLAGS_v = 5;
}

const auto binary_list_filepath = cmdl("binary-list").str();
vector<string> expected_binary_conv_outputs;
if (!binary_list_filepath.empty()) {
std::ifstream ifs(binary_list_filepath);
if (ifs.is_open()) {
string binary_conv_output;
while (ifs >> binary_conv_output) {
expected_binary_conv_outputs.push_back(binary_conv_output);
}
}
}

ONNX_NAMESPACE::ModelProto model_proto;
Expand All @@ -72,7 +111,15 @@ int main(int argc, char **argv) {
}

bnn::OnnxConverter converter;
converter.Convert(model_proto, cmdl[2], opt_level);
const auto binary_conv_outputs = converter.Convert(
model_proto, cmdl[2], opt_level, expected_binary_conv_outputs);

LOG(INFO) << "Conversion completed! Found " << binary_conv_outputs.size()
<< " binary convolutions. Add --verbose to get what they are.";
VLOG(5) << "The outputs name of binary convolutions are: ";
for (const auto &x : binary_conv_outputs) {
VLOG(5) << x;
}

google::protobuf::ShutdownProtobufLibrary();
return 0;
Expand Down
1 change: 0 additions & 1 deletion tools/onnx2bnn/python/onnx2bnn/__init__.py

This file was deleted.

18 changes: 0 additions & 18 deletions tools/onnx2bnn/python/onnx2bnn/__main__.py

This file was deleted.

17 changes: 0 additions & 17 deletions tools/onnx2bnn/python/onnx2bnn/convert.py

This file was deleted.

Loading

0 comments on commit bad1c0a

Please sign in to comment.