Skip to content

Commit

Permalink
refactor: carry conv/deconv kernel info at the const node level (#684)
Browse files Browse the repository at this point in the history
  • Loading branch information
alexander-camuto authored Jan 9, 2024
1 parent 35acbf0 commit 07a5080
Show file tree
Hide file tree
Showing 21 changed files with 321 additions and 220 deletions.
14 changes: 7 additions & 7 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ tokio-util = { version = "0.7.9", features = ["codec"] }
pyo3 = { version = "0.18.3", features = ["extension-module", "abi3-py37", "macros"], default_features = false, optional = true }
pyo3-asyncio = { version = "0.18.0", features = ["attributes", "tokio-runtime"], default_features = false, optional = true }
pyo3-log = { version = "0.8.1", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev= "4ee813d", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev= "7b1aa33b2f7d1f19b80e270c83320f0f94daff69", default_features = false, optional = true }
tabled = { version = "0.12.0", optional = true }


Expand Down
12 changes: 5 additions & 7 deletions benches/accum_conv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ const K: usize = 17;
#[derive(Clone, Debug)]
struct MyCircuit {
image: ValTensor<Fr>,
kernel: Tensor<Fr>,
bias: Tensor<Fr>,
kernel: ValTensor<Fr>,
bias: ValTensor<Fr>,
}

impl Circuit<Fr> for MyCircuit {
Expand Down Expand Up @@ -65,10 +65,8 @@ impl Circuit<Fr> for MyCircuit {
config
.layout(
&mut region,
&[self.image.clone()],
&[self.image.clone(), self.kernel.clone(), self.bias.clone()],
Box::new(PolyOp::Conv {
kernel: self.kernel.clone(),
bias: Some(self.bias.clone()),
padding: [(0, 0); 2],
stride: (1, 1),
}),
Expand Down Expand Up @@ -116,8 +114,8 @@ fn runcnvrl(c: &mut Criterion) {

let circuit = MyCircuit {
image: ValTensor::from(image),
kernel,
bias,
kernel: ValTensor::try_from(kernel).unwrap(),
bias: ValTensor::try_from(bias).unwrap(),
};

group.throughput(Throughput::Elements(*size as u64));
Expand Down
28 changes: 21 additions & 7 deletions examples/conv2d_mnist/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ struct MyCircuit<
// Given the stateless ConvConfig type information, a DNN trace is determined by its input and the parameters of its layers.
// Computing the trace still requires a forward pass. The intermediate activations are stored only by the layouter.
input: ValTensor<F>,
l0_params: [Tensor<F>; 2],
l2_params: [Tensor<F>; 2],
l0_params: [ValTensor<F>; 2],
l2_params: [ValTensor<F>; 2],
}

impl<
Expand Down Expand Up @@ -202,14 +202,20 @@ where
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);

let op = PolyOp::Conv {
kernel: self.l0_params[0].clone(),
bias: Some(self.l0_params[1].clone()),
padding: [(PADDING, PADDING); 2],
stride: (STRIDE, STRIDE),
};
let x = config
.layer_config
.layout(&mut region, &[self.input.clone()], Box::new(op))
.layout(
&mut region,
&[
self.input.clone(),
self.l0_params[0].clone(),
self.l0_params[1].clone(),
],
Box::new(op),
)
.unwrap();

let x = config
Expand All @@ -233,7 +239,7 @@ where
.layer_config
.layout(
&mut region,
&[self.l2_params[0].clone().try_into().unwrap(), x],
&[self.l2_params[0].clone(), x],
Box::new(PolyOp::Einsum {
equation: "ij,j->ik".to_string(),
}),
Expand All @@ -245,7 +251,7 @@ where
.layer_config
.layout(
&mut region,
&[x, self.l2_params[1].clone().try_into().unwrap()],
&[x, self.l2_params[1].clone()],
Box::new(PolyOp::Add),
)
.unwrap()
Expand Down Expand Up @@ -345,9 +351,13 @@ pub fn runconv() {
.unwrap();
l0_kernels.set_visibility(&ezkl::graph::Visibility::Private);

let l0_kernels = l0_kernels.try_into().unwrap();

let mut l0_bias = Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::i32_to_felt(0)));
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);

let l0_bias = l0_bias.try_into().unwrap();

let mut l2_biases = Tensor::<F>::from(myparams.biases.into_iter().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
Expand All @@ -357,6 +367,8 @@ pub fn runconv() {
l2_biases.set_visibility(&ezkl::graph::Visibility::Private);
l2_biases.reshape(&[l2_biases.len(), 1]).unwrap();

let l2_biases = l2_biases.try_into().unwrap();

let mut l2_weights = Tensor::<F>::from(myparams.weights.into_iter().flatten().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
Expand All @@ -366,6 +378,8 @@ pub fn runconv() {
l2_weights.set_visibility(&ezkl::graph::Visibility::Private);
l2_weights.reshape(&[CLASSES, LEN]).unwrap();

let l2_weights = l2_weights.try_into().unwrap();

let circuit = MyCircuit::<
LEN,
10,
Expand Down
51 changes: 51 additions & 0 deletions examples/onnx/bitshift/gen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
from torch import nn
import torch
import json
import numpy as np


class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()

def forward(self, w, x, y, z):

return x << 2, y >> 3, z << 1, w >> 4


circuit = MyModel()

# random integers between 0 and 100
x = torch.empty(1, 3).uniform_(0, 100).to(torch.int32)
y = torch.empty(1, 3).uniform_(0, 100).to(torch.int32)
z = torch.empty(1, 3).uniform_(0, 100).to(torch.int32)
w = torch.empty(1, 3).uniform_(0, 100).to(torch.int32)

torch.onnx.export(circuit, (w, x, y, z), "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=16, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input', 'input1', 'input2',
'input3'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'input1': {0: 'batch_size'},
'input2': {0: 'batch_size'},
'input3': {0: 'batch_size'},
'output': {0: 'batch_size'},
'output1': {0: 'batch_size'},
'output2': {0: 'batch_size'},
'output3': {0: 'batch_size'}})


d = ((w).detach().numpy()).reshape([-1]).tolist()
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
d2 = ((y).detach().numpy()).reshape([-1]).tolist()
d3 = ((z).detach().numpy()).reshape([-1]).tolist()

data = dict(
input_data=[d, d1, d2, d3],
)

# Serialize data into file:
json.dump(data, open("input.json", 'w'))
1 change: 1 addition & 0 deletions examples/onnx/bitshift/input.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"input_data": [[41, 39, 49], [13, 55, 66], [85, 60, 48], [25, 15, 15]]}
Binary file added examples/onnx/bitshift/network.onnx
Binary file not shown.
2 changes: 1 addition & 1 deletion examples/onnx/bitwise_ops/input.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"input_data": [[true, true, false], [false, true, true], [true, true, true], [false, true, false]]}
{"input_data": [[false, true, false], [false, true, true], [false, false, false], [false, true, true]]}
40 changes: 40 additions & 0 deletions examples/onnx/remainder/gen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from torch import nn
import torch
import json
import numpy as np


class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()

def forward(self, x):
return x % 0.5


circuit = MyModel()

x = torch.empty(1, 8).uniform_(0, 1)

out = circuit(x)

print(out)

torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})


d1 = ((x).detach().numpy()).reshape([-1]).tolist()

data = dict(
input_data=[d1],
)

# Serialize data into file:
json.dump(data, open("input.json", 'w'))
1 change: 1 addition & 0 deletions examples/onnx/remainder/input.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"input_data": [[0.24276268482208252, 0.7709522247314453, 0.3388288617134094, 0.04099464416503906, 0.5914043188095093, 0.6746469736099243, 0.32862555980682373, 0.6761162877082825]]}
Binary file added examples/onnx/remainder/network.onnx
Binary file not shown.
8 changes: 7 additions & 1 deletion src/circuit/ops/layouts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2060,7 +2060,13 @@ pub fn conv<F: PrimeField + TensorType + PartialOrd + std::marker::Send + std::m
let mut res = einsum(config, region, &[local_image, local_kernel], "i,i->")?;

if has_bias {
let bias = values[2].get_single_elem(start_kernel_index)?;
let bias_index = if values[2].len() > 1 {
start_kernel_index
} else {
0
};

let bias = values[2].get_single_elem(bias_index)?;
res = pairwise(config, region, &[res, bias], BaseOp::Add)?;
}
region.flush()?;
Expand Down
6 changes: 1 addition & 5 deletions src/circuit/ops/lookup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -231,11 +231,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
| LookupOp::LessThan { .. }
| LookupOp::GreaterThanEqual { .. }
| LookupOp::LessThanEqual { .. }
| LookupOp::KroneckerDelta
| LookupOp::Round { .. }
| LookupOp::RoundHalfToEven { .. }
| LookupOp::Ceil { .. }
| LookupOp::Floor { .. } => 0,
| LookupOp::KroneckerDelta => 0,
_ => inputs_scale[0],
};
Ok(scale)
Expand Down
Loading

0 comments on commit 07a5080

Please sign in to comment.