Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unify the order of paddings, fix bug of LRN and so on #46

Merged
merged 13 commits into from
Apr 11, 2019
14 changes: 7 additions & 7 deletions common/Shaper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@ using std::vector;

/**
* strides: [stride_y, stride_x]
* paddings: [top, right, bottom, left]
* paddings: [top, left, bottom, right]
*/
void Shaper::Conv(const std::string &input_name,
const std::string &weight_name,
const std::vector<int32_t> paddings,
const std::vector<int32_t> strides,
const std::string &output_name) {
Shaper::Conv(input_name, strides[1], strides[0], 1, 1,
paddings[3], paddings[1], paddings[0], paddings[2],
paddings[1], paddings[3], paddings[0], paddings[2],
weight_name, output_name);
}

Expand All @@ -27,7 +27,7 @@ void Shaper::Conv(const std::string &input_name,
const std::string &weight_name,
const std::string &output_name) {
Shaper::Conv(input_name, strides[1], strides[0], dilations[1], dilations[0],
paddings[3], paddings[1], paddings[0], paddings[2],
paddings[1], paddings[3], paddings[0], paddings[2],
weight_name, output_name);
}

Expand Down Expand Up @@ -79,7 +79,7 @@ void Shaper::DepthwiseConv(const std::string &input_name,
const std::string &weight_name,
const std::string &output_name) {
Shaper::DepthwiseConv(input_name, strides[1], strides[0], dilations[1],
dilations[0], paddings[3], paddings[1], paddings[0],
dilations[0], paddings[1], paddings[3], paddings[0],
paddings[2], weight_name, output_name);
}

Expand Down Expand Up @@ -122,7 +122,7 @@ void Shaper::DepthwiseConv(const std::string &input_name,
const std::vector<int32_t> paddings,
const std::vector<int32_t> strides,
const std::string &output_name) {
DepthwiseConv(input_name, weight_name, paddings[3], paddings[1], paddings[0], paddings[2], strides[1], strides[0], output_name);
DepthwiseConv(input_name, weight_name, paddings[1], paddings[3], paddings[0], paddings[2], strides[1], strides[0], output_name);
}

void Shaper::StridedSlice(const std::string &input_name,
Expand Down Expand Up @@ -176,13 +176,13 @@ void Shaper::Pool(const std::string &input_name, int32_t padding_left,
/**
* kernel_shape: [height, width]
* strides: [stride_y, stride_x]
* pads: [top, right, bottom, left]
* pads: [top, left, bottom, right]
*/
void Shaper::Pool(const std::string &input_name, const std::vector<int32_t> kernel_shape,
const std::vector<int32_t> pads,
const std::vector<int32_t> strides,
const std::string &output_name) {
Shaper::Pool(input_name, pads[3], pads[1], pads[0], pads[2], strides[1], strides[0], kernel_shape[1], kernel_shape[0], output_name);
Shaper::Pool(input_name, pads[1], pads[3], pads[0], pads[2], strides[1], strides[0], kernel_shape[1], kernel_shape[0], output_name);
}

void Shaper::Softmax(const std::string &input_name,
Expand Down
3 changes: 1 addition & 2 deletions common/daq.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -157,11 +157,10 @@ table Dequantize {

table LRN {
input:string;
size:int;
radius:int;
bias:float;
alpha:float;
beta:float;
dim:int;
output:string;
}

Expand Down
34 changes: 11 additions & 23 deletions common/daq_generated.h
Original file line number Diff line number Diff line change
Expand Up @@ -2005,18 +2005,17 @@ inline flatbuffers::Offset<Dequantize> CreateDequantizeDirect(
struct LRN FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum {
VT_INPUT = 4,
VT_SIZE = 6,
VT_RADIUS = 6,
VT_BIAS = 8,
VT_ALPHA = 10,
VT_BETA = 12,
VT_DIM = 14,
VT_OUTPUT = 16
VT_OUTPUT = 14
};
const flatbuffers::String *input() const {
return GetPointer<const flatbuffers::String *>(VT_INPUT);
}
int32_t size() const {
return GetField<int32_t>(VT_SIZE, 0);
int32_t radius() const {
return GetField<int32_t>(VT_RADIUS, 0);
}
float bias() const {
return GetField<float>(VT_BIAS, 0.0f);
Expand All @@ -2027,21 +2026,17 @@ struct LRN FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
float beta() const {
return GetField<float>(VT_BETA, 0.0f);
}
int32_t dim() const {
return GetField<int32_t>(VT_DIM, 0);
}
const flatbuffers::String *output() const {
return GetPointer<const flatbuffers::String *>(VT_OUTPUT);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_INPUT) &&
verifier.VerifyString(input()) &&
VerifyField<int32_t>(verifier, VT_SIZE) &&
VerifyField<int32_t>(verifier, VT_RADIUS) &&
VerifyField<float>(verifier, VT_BIAS) &&
VerifyField<float>(verifier, VT_ALPHA) &&
VerifyField<float>(verifier, VT_BETA) &&
VerifyField<int32_t>(verifier, VT_DIM) &&
VerifyOffset(verifier, VT_OUTPUT) &&
verifier.VerifyString(output()) &&
verifier.EndTable();
Expand All @@ -2054,8 +2049,8 @@ struct LRNBuilder {
void add_input(flatbuffers::Offset<flatbuffers::String> input) {
fbb_.AddOffset(LRN::VT_INPUT, input);
}
void add_size(int32_t size) {
fbb_.AddElement<int32_t>(LRN::VT_SIZE, size, 0);
void add_radius(int32_t radius) {
fbb_.AddElement<int32_t>(LRN::VT_RADIUS, radius, 0);
}
void add_bias(float bias) {
fbb_.AddElement<float>(LRN::VT_BIAS, bias, 0.0f);
Expand All @@ -2066,9 +2061,6 @@ struct LRNBuilder {
void add_beta(float beta) {
fbb_.AddElement<float>(LRN::VT_BETA, beta, 0.0f);
}
void add_dim(int32_t dim) {
fbb_.AddElement<int32_t>(LRN::VT_DIM, dim, 0);
}
void add_output(flatbuffers::Offset<flatbuffers::String> output) {
fbb_.AddOffset(LRN::VT_OUTPUT, output);
}
Expand All @@ -2087,40 +2079,36 @@ struct LRNBuilder {
inline flatbuffers::Offset<LRN> CreateLRN(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> input = 0,
int32_t size = 0,
int32_t radius = 0,
float bias = 0.0f,
float alpha = 0.0f,
float beta = 0.0f,
int32_t dim = 0,
flatbuffers::Offset<flatbuffers::String> output = 0) {
LRNBuilder builder_(_fbb);
builder_.add_output(output);
builder_.add_dim(dim);
builder_.add_beta(beta);
builder_.add_alpha(alpha);
builder_.add_bias(bias);
builder_.add_size(size);
builder_.add_radius(radius);
builder_.add_input(input);
return builder_.Finish();
}

inline flatbuffers::Offset<LRN> CreateLRNDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *input = nullptr,
int32_t size = 0,
int32_t radius = 0,
float bias = 0.0f,
float alpha = 0.0f,
float beta = 0.0f,
int32_t dim = 0,
const char *output = nullptr) {
return DNN::CreateLRN(
_fbb,
input ? _fbb.CreateString(input) : 0,
size,
radius,
bias,
alpha,
beta,
dim,
output ? _fbb.CreateString(output) : 0);
}

Expand Down
10 changes: 6 additions & 4 deletions dnnlibrary/include/ModelBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,12 @@ class ModelBuilder {
int32_t fuse_code, const std::string &output);
#endif // __ANDROID_API__ >= 27
#if __ANDROID_API__ >= 27
ModelBuilder::Index AddLRN(const std::string &input, int32_t size,
float bias, float alpha, float beta, int32_t dim,
ModelBuilder::Index AddDequantize(const std::string &input,
const std::string &output);
#endif // __ANDROID_API__ >= 27
#if __ANDROID_API__ >= 27
ModelBuilder::Index AddLRN(const std::string &input, int32_t radius,
float bias, float alpha, float beta,
const std::string &output);
#endif // __ANDROID_API__ >= 27
// ModelBuilder auto generated methods end
Expand Down Expand Up @@ -248,8 +252,6 @@ class ModelBuilder {
const std::string &input1_name, const std::string &input2_name,
const std::string &output_name,
const std::optional<QuantInfo> &output_quant_info = std::nullopt);
Index AddDequantize(const std::string &input_name,
const std::string &output_name);
#if __ANDROID_API__ >= __ANDROID_API_P__
ModelBuilder &AllowFp16(const bool allowed);
#endif
Expand Down
16 changes: 8 additions & 8 deletions dnnlibrary/src/DaqReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -202,17 +202,17 @@ void AddLayers(const DNN::Model &model, ModelBuilder &builder) {
UNPACK_LAYER_QUANT(avepool, strides, pads, kernel_shape, fuse,
input, output);
builder.AddPool(
input, strides[1], strides[0], pads[2], pads[3], pads[0],
pads[1], kernel_shape[0], kernel_shape[1], fuse,
input, strides[1], strides[0], pads[1], pads[3], pads[0],
pads[2], kernel_shape[0], kernel_shape[1], fuse,
ModelBuilder::PoolingType::AVE_POOL, output, quant_info);
break;
}
case DNN::LayerType::MaxPool: {
UNPACK_LAYER_QUANT(maxpool, strides, pads, kernel_shape, fuse,
input, output);
builder.AddPool(
input, strides[1], strides[0], pads[2], pads[3], pads[0],
pads[1], kernel_shape[0], kernel_shape[1], fuse,
input, strides[1], strides[0], pads[1], pads[3], pads[0],
pads[2], kernel_shape[0], kernel_shape[1], fuse,
ModelBuilder::PoolingType::MAX_POOL, output, quant_info);
break;
}
Expand Down Expand Up @@ -283,10 +283,10 @@ void AddLayers(const DNN::Model &model, ModelBuilder &builder) {
#endif
break;
}
default: {
throw std::invalid_argument("Unsupported layer " +
layer_type_to_str(layer->type()));
}
case DNN::LayerType::LRN: {
ADD_LAYER(lrn, LRN, input, radius, bias, alpha, beta, output);
break;
}
}
}
}
Expand Down
35 changes: 19 additions & 16 deletions dnnlibrary/src/ModelBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -472,14 +472,29 @@ ModelBuilder::Index ModelBuilder::AddMul(const std::string &input, float scalar,
}
#endif // __ANDROID_API__ >= 27
#if __ANDROID_API__ >= 27
ModelBuilder::Index ModelBuilder::AddLRN(const std::string &input, int32_t size,
float bias, float alpha, float beta,
int32_t dim,
ModelBuilder::Index ModelBuilder::AddDequantize(const std::string &input,
const std::string &output) {
IndexSeq input_indexes;
const auto input_idx = operand_indexes_.at(input);
input_indexes.push_back(input_idx);
shaper_.Identity(input, output);
const OperandType operand_type =
GetOperandType(Type::FLOAT32, shaper_[output]);
const auto output_idx = AddOperation(ANEURALNETWORKS_DEQUANTIZE,
input_indexes, operand_type)[0];
RegisterOperand(output, output_idx, operand_type);
return output_idx;
}
#endif // __ANDROID_API__ >= 27
#if __ANDROID_API__ >= 27
ModelBuilder::Index ModelBuilder::AddLRN(const std::string &input,
int32_t radius, float bias,
float alpha, float beta,
const std::string &output) {
IndexSeq input_indexes;
const auto input_idx = operand_indexes_.at(input);
input_indexes.push_back(input_idx);
AddScalarOperands(input_indexes, size, bias, alpha, beta, dim);
AddScalarOperands(input_indexes, radius, bias, alpha, beta);
shaper_.Identity(input, output);
const OperandType operand_type =
GetOperandType(operand_types_.at(input).type, shaper_[output]);
Expand Down Expand Up @@ -550,18 +565,6 @@ ModelBuilder::Index ModelBuilder::AddSoftMax(const string &input_name,
return AddSoftmax(input_name, beta, output_name);
}

ModelBuilder::Index ModelBuilder::AddDequantize(
const std::string &input_name, const std::string &output_name) {
const auto input = operand_indexes_[input_name];
shaper_.Eltwise(input_name, output_name);
IndexSeq input_indexes{input};
const OperandType operand_type(Type::TENSOR_FLOAT32, shaper_[output_name]);
const auto output_idx = AddOperation(ANEURALNETWORKS_DEQUANTIZE,
input_indexes, operand_type)[0];
RegisterOperand(output_name, output_idx, operand_type);
return output_idx;
}

ModelBuilder::Index ModelBuilder::AddFC(
const string &input_name, int32_t activation, const string &weight_name,
const std::optional<string> &bias_name, const string &output_name,
Expand Down
27 changes: 21 additions & 6 deletions generate_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,11 @@ class Target(Enum):
OnnxConverter = 2


def clang_format(filename: str):
import subprocess
subprocess.run(['clang-format', '-i', filename])


def cogout(txt):
print(txt, end='', file=str_io)

Expand Down Expand Up @@ -131,6 +136,8 @@ def infer_cfg(cfg, target: Target):
op['support_quant_asymm'] = False
if 'converter' not in op:
op['converter'] = True
if 'output_tensor_type' not in op:
op['output_tensor_type'] = 'auto'
for ipt in op['input']:
if 'predefined' not in ipt:
ipt['predefined'] = ''
Expand Down Expand Up @@ -162,6 +169,7 @@ def update_code(file: str, label: str) -> None:
new_s = s[:idx1] + str_io.getvalue() + s[idx2:]
f.write(new_s)
str_io = io.StringIO()
clang_format(file)


def generate_onnx_converter():
Expand All @@ -178,10 +186,6 @@ def generate_onnx_converter():
cogoutl(f"void OnnxConverter::AddLayer{op['name']}{'' if op['converter'] else 'Impl'}({params_str}) {{")
if op['fused']:
cogoutl(f"const auto activation = FindActivation(model_proto_, output);")
cogoutl("if (activation.first.has_value()) {")
cogoutl("skipped_act_.push_back(activation.first.value());")
cogoutl("name_map_[activation.first.value()] = output;")
cogoutl("}")
for x in op['input']:
if x['learnable']:
assert x['cpp_type'] in ['str', 'optional_str']
Expand All @@ -200,8 +204,16 @@ def generate_onnx_converter():
if x['cpp_type'] == 'str_list':
cogoutl(f"const auto {x['name']}_fb = FbStrVector({x['name']});")

shaper_params = []
for x in op['input']:
if x.get('needed_by_shaper', False):
if x['cpp_type'] == 'str':
shaper_params.append(f"m({x['name']})")
else:
shaper_params.append(f"{x['name']}")
shaper_params += [x['name'] for x in op['output']]
cogoutl(
f"shaper_.{op['shaper']}({', '.join([x['name'] for x in ipt_opt if x.get('needed_by_shaper', False)])});")
f"shaper_.{op['shaper']}({', '.join(shaper_params)});")

def get_input_param(x):
if x['cpp_type'] == 'str':
Expand Down Expand Up @@ -264,7 +276,10 @@ def generate_model_builder():
cogoutl('AddScalarOperands(input_indexes, {});'.format(', '.join([x['name'] for x in scalar_input])))
cogoutl('shaper_.{}({});'.format(op['shaper'],
', '.join([x['name'] for x in ipt_opt if x.get('needed_by_shaper', False)])))
if op['input'][0]['cpp_type'] == 'str_list':
if op['output_tensor_type'] != 'auto':
op_type_params = ['Type::{}'.format(op['output_tensor_type']),
'shaper_[{}]'.format(op['output'][0]['name'])]
elif op['input'][0]['cpp_type'] == 'str_list':
op_type_params = ['operand_types_.at({}[0]).type'.format(op['input'][0]['name']),
'shaper_[{}]'.format(op['output'][0]['name'])]
else:
Expand Down
Loading