Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change code-style #100

Draft
wants to merge 133 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
133 commits
Select commit Hold shift + click to select a range
a5ffa49
Pass in const reference to weights
olilarkin Dec 27, 2023
0833ea6
Introduce weights_it alias
olilarkin Dec 30, 2023
78f42d0
Use Eigen::Ref<> rather than normal references
olilarkin Dec 30, 2023
d60bcc1
Remove NAM_SAMPLE
olilarkin Jan 19, 2024
913e5b3
finalize_ -> Finalize
olilarkin Jan 19, 2024
b3ef0e2
_prewarm_samples -> mPrewarmSamples
olilarkin Jan 19, 2024
9298e8b
num_frames -> numFrames
olilarkin Jan 19, 2024
9378f8f
process -> Process
olilarkin Jan 19, 2024
af31d4e
prewarm -> Prewarm
olilarkin Jan 19, 2024
7698420
_set_receptive_field -> SetReceptiveField
olilarkin Jan 19, 2024
693bf13
_reset_input_buffer
olilarkin Jan 19, 2024
6e55afa
_update_buffers_
olilarkin Jan 19, 2024
0f4e7cb
_rewind_buffers_
olilarkin Jan 19, 2024
3955a22
_input_buffer -> mInputBuffer
olilarkin Jan 19, 2024
d3068c8
_output_buffer -> mOutputBuffer
olilarkin Jan 19, 2024
8544e1a
_input_buffer_offset
olilarkin Jan 19, 2024
dadb1a7
mReceptiveField
olilarkin Jan 19, 2024
056777a
Remove unused _input_buffer_channels
olilarkin Jan 19, 2024
53b2915
newReceptiveField
olilarkin Jan 19, 2024
df723f7
input_buffer_size
olilarkin Jan 19, 2024
86a9ca4
receptive_field
olilarkin Jan 19, 2024
5c515d8
expectedSampleRate
olilarkin Jan 19, 2024
1fc8ec1
mBias
olilarkin Jan 19, 2024
b997c70
mWeight
olilarkin Jan 19, 2024
b882f5b
_do_bias
olilarkin Jan 19, 2024
6895341
_bias -> bias
olilarkin Jan 19, 2024
023f3eb
_weight
olilarkin Jan 19, 2024
ac2c199
_dilation
olilarkin Jan 19, 2024
483f52e
_dilation
olilarkin Jan 19, 2024
cb74e17
do_bias
olilarkin Jan 19, 2024
73f9686
process_
olilarkin Jan 19, 2024
355b2d5
set_weights_
olilarkin Jan 19, 2024
6eb415a
set_size_
olilarkin Jan 19, 2024
59832b9
set_size_and_weights_
olilarkin Jan 19, 2024
513b453
in_channels
olilarkin Jan 19, 2024
265b74c
out_channels
olilarkin Jan 19, 2024
e622ae2
kernel_size
olilarkin Jan 19, 2024
3eafcee
get_in_channels
olilarkin Jan 19, 2024
1b6def1
GetKernelSize
olilarkin Jan 19, 2024
52ad4c4
get_num_weights
olilarkin Jan 19, 2024
97c0223
get_out_channels
olilarkin Jan 19, 2024
92d6769
GetDilation
olilarkin Jan 19, 2024
180a475
VerifyConfigVersion
olilarkin Jan 19, 2024
b9519c6
get_dsp
olilarkin Jan 19, 2024
7afdef8
model_file
olilarkin Jan 19, 2024
116a3c5
GetDSPLegacy
olilarkin Jan 19, 2024
0aebcdd
apply
olilarkin Jan 19, 2024
5ecaa6b
get_activation
olilarkin Jan 19, 2024
52b8bdf
enable_fast_tanh
olilarkin Jan 19, 2024
7f09e1d
disable_fast_tanh
olilarkin Jan 19, 2024
56f4015
sUsingFastTanh
olilarkin Jan 19, 2024
f454366
_verify_weights
olilarkin Jan 19, 2024
8097e06
_head
olilarkin Jan 19, 2024
b04e553
_head_output
olilarkin Jan 19, 2024
e029787
mBlockVals
olilarkin Jan 19, 2024
ae667b9
mBlocks
olilarkin Jan 19, 2024
42b469a
actual_weights
olilarkin Jan 19, 2024
d859806
_Head
olilarkin Jan 19, 2024
57a3132
mBatchnorm
olilarkin Jan 19, 2024
3774bb4
mScale
olilarkin Jan 19, 2024
61bfe24
loc
olilarkin Jan 19, 2024
c39606c
mHeadBias
olilarkin Jan 19, 2024
aadaa06
mHeadWeight
olilarkin Jan 19, 2024
f3c400f
_layers
olilarkin Jan 19, 2024
d491f55
_process_sample
olilarkin Jan 19, 2024
33ae6a2
input
olilarkin Jan 19, 2024
23171a1
num_layers
olilarkin Jan 19, 2024
a4659f8
input_size
olilarkin Jan 19, 2024
3673a3d
hidden_size
olilarkin Jan 19, 2024
045df5a
GetHiddenSize
olilarkin Jan 19, 2024
afb6ef1
_get_input_size
olilarkin Jan 19, 2024
fc3cfb1
get_hidden_state
olilarkin Jan 19, 2024
1cbec54
_set_num_frames_
olilarkin Jan 19, 2024
d5b723d
_set_condition_array
olilarkin Jan 19, 2024
4413912
_get_condition_dim
olilarkin Jan 19, 2024
6181f49
PrepareForFrames
olilarkin Jan 19, 2024
977bc78
AdvanceBuffers
olilarkin Jan 19, 2024
95f2edf
_head_output
olilarkin Jan 19, 2024
3b366e8
mHeadScale
olilarkin Jan 19, 2024
485f8a1
mHeadArrays
olilarkin Jan 19, 2024
4deb410
mCondition
olilarkin Jan 19, 2024
a98e181
mLayerArrayOutputs
olilarkin Jan 19, 2024
67bc968
mLayerArrays
olilarkin Jan 19, 2024
1040bb7
_LayerArray
olilarkin Jan 19, 2024
e9a5092
_num_frames
olilarkin Jan 19, 2024
a5e5a98
layer_array_params
olilarkin Jan 19, 2024
8d953b5
head_scale
olilarkin Jan 19, 2024
2624368
with_head
olilarkin Jan 19, 2024
ef59cb9
_apply_activation_
olilarkin Jan 19, 2024
8e968df
_buffers
olilarkin Jan 19, 2024
02e4934
mActivation
olilarkin Jan 19, 2024
f34aca9
mHead
olilarkin Jan 19, 2024
6949bd8
mLayers
olilarkin Jan 19, 2024
fea45ec
_channels
olilarkin Jan 19, 2024
2e9f0f1
set_num_frames_
olilarkin Jan 19, 2024
658fe38
GetReceptiveField
olilarkin Jan 19, 2024
ce394f8
_get_channels
olilarkin Jan 19, 2024
78df991
GetBufferSize
olilarkin Jan 19, 2024
20ff7ef
_layer_buffers
olilarkin Jan 19, 2024
f4188b7
_head_rechannel
olilarkin Jan 19, 2024
bc5cc64
_layers
olilarkin Jan 19, 2024
692847e
Layer
olilarkin Jan 19, 2024
628f1ba
mReChannel
olilarkin Jan 19, 2024
0e6018e
mBufferStart
olilarkin Jan 19, 2024
863fa4c
get_receptive_field
olilarkin Jan 19, 2024
f4b91f1
headOuputs
olilarkin Jan 19, 2024
1bceb17
headInputs
olilarkin Jan 19, 2024
9c7cddd
layerOutputs
olilarkin Jan 19, 2024
5ac7329
layer_inputs
olilarkin Jan 19, 2024
f0899d9
prepareForFrames
olilarkin Jan 19, 2024
a148e7a
advance_buffers_
olilarkin Jan 19, 2024
70bd378
LayerArrayParams
olilarkin Jan 20, 2024
018c7d1
_GetReceptiveField duplicate?? TODO
olilarkin Jan 20, 2024
fb3b52c
mGated
olilarkin Jan 20, 2024
c04902c
mActivation
olilarkin Jan 20, 2024
4b5c05b
mConv
olilarkin Jan 20, 2024
ebb212c
mInputMixin
olilarkin Jan 20, 2024
a0cfdef
DilatedConv
olilarkin Jan 20, 2024
7f94ff6
getchannels
olilarkin Jan 20, 2024
110bb40
condition_size
olilarkin Jan 20, 2024
5310784
mActivation
olilarkin Jan 20, 2024
7157123
remove all instances of this->
olilarkin Jan 20, 2024
0c83506
final_head_array
olilarkin Jan 20, 2024
a58e46c
headInput
olilarkin Jan 20, 2024
c559245
weights_it -> weightsIterator
olilarkin Jan 20, 2024
115e727
const std::filesystem::path&
olilarkin Jan 20, 2024
8622454
const std::string&
olilarkin Jan 20, 2024
c43383d
conditionSize
olilarkin Jan 20, 2024
1754a13
headSize
olilarkin Jan 20, 2024
f8743bb
layerConfig
olilarkin Jan 20, 2024
e090dfe
configFileName
olilarkin Jan 20, 2024
7ea2631
Add GetDSP(const char* jsonStr)
olilarkin Jan 21, 2024
cd6e73a
Remove non-standard header file
olilarkin Jan 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions NAM/activations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,24 @@ nam::activations::ActivationHardTanh _HARD_TANH = nam::activations::ActivationHa
nam::activations::ActivationReLU _RELU = nam::activations::ActivationReLU();
nam::activations::ActivationSigmoid _SIGMOID = nam::activations::ActivationSigmoid();

bool nam::activations::Activation::using_fast_tanh = false;
bool nam::activations::Activation::sUsingFastTanh = false;

std::unordered_map<std::string, nam::activations::Activation*> nam::activations::Activation::_activations =
{{"Tanh", &_TANH}, {"Hardtanh", &_HARD_TANH}, {"Fasttanh", &_FAST_TANH}, {"ReLU", &_RELU}, {"Sigmoid", &_SIGMOID}};

nam::activations::Activation* tanh_bak = nullptr;

nam::activations::Activation* nam::activations::Activation::get_activation(const std::string name)
nam::activations::Activation* nam::activations::Activation::GetActivation(const std::string& name)
{
if (_activations.find(name) == _activations.end())
return nullptr;

return _activations[name];
}

void nam::activations::Activation::enable_fast_tanh()
void nam::activations::Activation::EnableFastTanh()
{
nam::activations::Activation::using_fast_tanh = true;
nam::activations::Activation::sUsingFastTanh = true;

if (_activations["Tanh"] != _activations["Fasttanh"])
{
Expand All @@ -32,9 +32,9 @@ void nam::activations::Activation::enable_fast_tanh()
}
}

void nam::activations::Activation::disable_fast_tanh()
void nam::activations::Activation::DisableFastTanh()
{
nam::activations::Activation::using_fast_tanh = false;
nam::activations::Activation::sUsingFastTanh = false;

if (_activations["Tanh"] == _activations["Fasttanh"])
{
Expand Down
28 changes: 14 additions & 14 deletions NAM/activations.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,18 @@ class Activation
public:
Activation() = default;
virtual ~Activation() = default;
virtual void apply(Eigen::MatrixXf& matrix) { apply(matrix.data(), matrix.rows() * matrix.cols()); }
virtual void apply(Eigen::Block<Eigen::MatrixXf> block) { apply(block.data(), block.rows() * block.cols()); }
virtual void apply(Eigen::Block<Eigen::MatrixXf, -1, -1, true> block)
virtual void Apply(Eigen::Ref<Eigen::MatrixXf> matrix) { Apply(matrix.data(), matrix.rows() * matrix.cols()); }
virtual void Apply(Eigen::Block<Eigen::MatrixXf> block) { Apply(block.data(), block.rows() * block.cols()); }
virtual void Apply(Eigen::Block<Eigen::MatrixXf, -1, -1, true> block)
{
apply(block.data(), block.rows() * block.cols());
Apply(block.data(), block.rows() * block.cols());
}
virtual void apply(float* data, long size) {}
virtual void Apply(float* data, long size) {}

static Activation* get_activation(const std::string name);
static void enable_fast_tanh();
static void disable_fast_tanh();
static bool using_fast_tanh;
static Activation* GetActivation(const std::string& name);
static void EnableFastTanh();
static void DisableFastTanh();
static bool sUsingFastTanh;

protected:
static std::unordered_map<std::string, Activation*> _activations;
Expand All @@ -65,7 +65,7 @@ class Activation
class ActivationTanh : public Activation
{
public:
void apply(float* data, long size) override
void Apply(float* data, long size) override
{
for (long pos = 0; pos < size; pos++)
{
Expand All @@ -77,7 +77,7 @@ class ActivationTanh : public Activation
class ActivationHardTanh : public Activation
{
public:
void apply(float* data, long size) override
void Apply(float* data, long size) override
{
for (long pos = 0; pos < size; pos++)
{
Expand All @@ -89,7 +89,7 @@ class ActivationHardTanh : public Activation
class ActivationFastTanh : public Activation
{
public:
void apply(float* data, long size) override
void Apply(float* data, long size) override
{
for (long pos = 0; pos < size; pos++)
{
Expand All @@ -101,7 +101,7 @@ class ActivationFastTanh : public Activation
class ActivationReLU : public Activation
{
public:
void apply(float* data, long size) override
void Apply(float* data, long size) override
{
for (long pos = 0; pos < size; pos++)
{
Expand All @@ -113,7 +113,7 @@ class ActivationReLU : public Activation
class ActivationSigmoid : public Activation
{
public:
void apply(float* data, long size) override
void Apply(float* data, long size) override
{
for (long pos = 0; pos < size; pos++)
{
Expand Down
154 changes: 77 additions & 77 deletions NAM/convnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,174 +10,174 @@
#include "dsp.h"
#include "convnet.h"

nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator& weights)
nam::convnet::BatchNorm::BatchNorm(const int dim, weightsIterator& weights)
{
// Extract from param buffer
Eigen::VectorXf running_mean(dim);
Eigen::VectorXf running_var(dim);
Eigen::VectorXf _weight(dim);
Eigen::VectorXf _bias(dim);
Eigen::VectorXf weight(dim);
Eigen::VectorXf bias(dim);
for (int i = 0; i < dim; i++)
running_mean(i) = *(weights++);
for (int i = 0; i < dim; i++)
running_var(i) = *(weights++);
for (int i = 0; i < dim; i++)
_weight(i) = *(weights++);
weight(i) = *(weights++);
for (int i = 0; i < dim; i++)
_bias(i) = *(weights++);
bias(i) = *(weights++);
float eps = *(weights++);

// Convert to scale & loc
this->scale.resize(dim);
this->loc.resize(dim);
mScale.resize(dim);
mLoc.resize(dim);
for (int i = 0; i < dim; i++)
this->scale(i) = _weight(i) / sqrt(eps + running_var(i));
this->loc = _bias - this->scale.cwiseProduct(running_mean);
mScale(i) = weight(i) / sqrt(eps + running_var(i));
mLoc = bias - mScale.cwiseProduct(running_mean);
}

void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, const long i_end) const
void nam::convnet::BatchNorm::Process(Eigen::Ref<Eigen::MatrixXf> x, const long i_start, const long i_end) const
{
// todo using colwise?
// #speed but conv probably dominates
for (auto i = i_start; i < i_end; i++)
{
x.col(i) = x.col(i).cwiseProduct(this->scale);
x.col(i) += this->loc;
x.col(i) = x.col(i).cwiseProduct(mScale);
x.col(i) += mLoc;
}
}

void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation,
const bool batchnorm, const std::string activation,
std::vector<float>::iterator& weights)
void nam::convnet::ConvNetBlock::SetWeights(const int inChannels, const int outChannels, const int dilation,
const bool doBatchNorm, const std::string& activation,
weightsIterator& weights)
{
this->_batchnorm = batchnorm;
mDoBatchNorm = doBatchNorm;
// HACK 2 kernel
this->conv.set_size_and_weights_(in_channels, out_channels, 2, _dilation, !batchnorm, weights);
if (this->_batchnorm)
this->batchnorm = BatchNorm(out_channels, weights);
this->activation = activations::Activation::get_activation(activation);
conv.SetSizeAndWeights(inChannels, outChannels, 2, dilation, !doBatchNorm, weights);
if (mDoBatchNorm)
mBatchnorm = BatchNorm(outChannels, weights);
mActivation = activations::Activation::GetActivation(activation);
}

void nam::convnet::ConvNetBlock::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start,
void nam::convnet::ConvNetBlock::Process(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::Ref<Eigen::MatrixXf> output, const long i_start,
const long i_end) const
{
const long ncols = i_end - i_start;
this->conv.process_(input, output, i_start, ncols, i_start);
if (this->_batchnorm)
this->batchnorm.process_(output, i_start, i_end);
conv.Process(input, output, i_start, ncols, i_start);
if (mDoBatchNorm)
mBatchnorm.Process(output, i_start, i_end);

this->activation->apply(output.middleCols(i_start, ncols));
mActivation->Apply(output.middleCols(i_start, ncols));
}

long nam::convnet::ConvNetBlock::get_out_channels() const
long nam::convnet::ConvNetBlock::GetOutChannels() const
{
return this->conv.get_out_channels();
return conv.GetOutChannels();
}

nam::convnet::_Head::_Head(const int channels, std::vector<float>::iterator& weights)
nam::convnet::Head::Head(const int channels, weightsIterator& weights)
{
this->_weight.resize(channels);
mWeight.resize(channels);
for (int i = 0; i < channels; i++)
this->_weight[i] = *(weights++);
this->_bias = *(weights++);
mWeight[i] = *(weights++);
mBias = *(weights++);
}

void nam::convnet::_Head::process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start,
void nam::convnet::Head::Process(const Eigen::Ref<const Eigen::MatrixXf> input, Eigen::VectorXf& output, const long i_start,
const long i_end) const
{
const long length = i_end - i_start;
output.resize(length);
for (long i = 0, j = i_start; i < length; i++, j++)
output(i) = this->_bias + input.col(j).dot(this->_weight);
output(i) = mBias + input.col(j).dot(mWeight);
}

nam::convnet::ConvNet::ConvNet(const int channels, const std::vector<int>& dilations, const bool batchnorm,
const std::string activation, std::vector<float>& weights,
const double expected_sample_rate)
: Buffer(*std::max_element(dilations.begin(), dilations.end()), expected_sample_rate)
const std::string& activation, std::vector<float>& weights,
const double expectedSampleRate)
: Buffer(*std::max_element(dilations.begin(), dilations.end()), expectedSampleRate)
{
this->_verify_weights(channels, dilations, batchnorm, weights.size());
this->_blocks.resize(dilations.size());
std::vector<float>::iterator it = weights.begin();
VerifyWeights(channels, dilations, batchnorm, weights.size());
mBlocks.resize(dilations.size());
weightsIterator it = weights.begin();
for (size_t i = 0; i < dilations.size(); i++)
this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it);
this->_block_vals.resize(this->_blocks.size() + 1);
for (auto& matrix : this->_block_vals)
mBlocks[i].SetWeights(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it);
mBlockVals.resize(mBlocks.size() + 1);
for (auto& matrix : mBlockVals)
matrix.setZero();
std::fill(this->_input_buffer.begin(), this->_input_buffer.end(), 0.0f);
this->_head = _Head(channels, it);
std::fill(mInputBuffer.begin(), mInputBuffer.end(), 0.0f);
mHead = Head(channels, it);
if (it != weights.end())
throw std::runtime_error("Didn't touch all the weights when initializing ConvNet");

_prewarm_samples = 1;
mPrewarmSamples = 1;
for (size_t i = 0; i < dilations.size(); i++)
_prewarm_samples += dilations[i];
mPrewarmSamples += dilations[i];
}


void nam::convnet::ConvNet::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames)
void nam::convnet::ConvNet::Process(float* input, float* output, const int numFrames)

{
this->_update_buffers_(input, num_frames);
UpdateBuffers(input, numFrames);
// Main computation!
const long i_start = this->_input_buffer_offset;
const long i_end = i_start + num_frames;
const long i_start = mInputBufferOffset;
const long i_end = i_start + numFrames;
// TODO one unnecessary copy :/ #speed
for (auto i = i_start; i < i_end; i++)
this->_block_vals[0](0, i) = this->_input_buffer[i];
for (size_t i = 0; i < this->_blocks.size(); i++)
this->_blocks[i].process_(this->_block_vals[i], this->_block_vals[i + 1], i_start, i_end);
mBlockVals[0](0, i) = mInputBuffer[i];
for (size_t i = 0; i < mBlocks.size(); i++)
mBlocks[i].Process(mBlockVals[i], mBlockVals[i + 1], i_start, i_end);
// TODO clean up this allocation
this->_head.process_(this->_block_vals[this->_blocks.size()], this->_head_output, i_start, i_end);
mHead.Process(mBlockVals[mBlocks.size()], mHeadOutput, i_start, i_end);
// Copy to required output array (TODO tighten this up)
for (int s = 0; s < num_frames; s++)
output[s] = this->_head_output(s);
for (int s = 0; s < numFrames; s++)
output[s] = mHeadOutput(s);
}

void nam::convnet::ConvNet::_verify_weights(const int channels, const std::vector<int>& dilations, const bool batchnorm,
const size_t actual_weights)
void nam::convnet::ConvNet::VerifyWeights(const int channels, const std::vector<int>& dilations, const bool batchnorm,
const size_t actualWeights)
{
// TODO
}

void nam::convnet::ConvNet::_update_buffers_(NAM_SAMPLE* input, const int num_frames)
void nam::convnet::ConvNet::UpdateBuffers(float* input, const int numFrames)
{
this->Buffer::_update_buffers_(input, num_frames);
Buffer::UpdateBuffers(input, numFrames);

const size_t buffer_size = this->_input_buffer.size();
const size_t buffer_size = mInputBuffer.size();

if (this->_block_vals[0].rows() != 1 || this->_block_vals[0].cols() != buffer_size)
if (mBlockVals[0].rows() != Eigen::Index(1) || mBlockVals[0].cols() != Eigen::Index(buffer_size))
{
this->_block_vals[0].resize(1, buffer_size);
this->_block_vals[0].setZero();
mBlockVals[0].resize(1, buffer_size);
mBlockVals[0].setZero();
}

for (size_t i = 1; i < this->_block_vals.size(); i++)
for (size_t i = 1; i < mBlockVals.size(); i++)
{
if (this->_block_vals[i].rows() == this->_blocks[i - 1].get_out_channels()
&& this->_block_vals[i].cols() == buffer_size)
if (mBlockVals[i].rows() == mBlocks[i - 1].GetOutChannels()
&& mBlockVals[i].cols() == Eigen::Index(buffer_size))
continue; // Already has correct size
this->_block_vals[i].resize(this->_blocks[i - 1].get_out_channels(), buffer_size);
this->_block_vals[i].setZero();
mBlockVals[i].resize(mBlocks[i - 1].GetOutChannels(), buffer_size);
mBlockVals[i].setZero();
}
}

void nam::convnet::ConvNet::_rewind_buffers_()
void nam::convnet::ConvNet::RewindBuffers()
{
// Need to rewind the block vals first because Buffer::rewind_buffers()
// resets the offset index
// The last _block_vals is the output of the last block and doesn't need to be
// rewound.
for (size_t k = 0; k < this->_block_vals.size() - 1; k++)
for (size_t k = 0; k < mBlockVals.size() - 1; k++)
{
// We actually don't need to pull back a lot...just as far as the first
// input sample would grab from dilation
const long _dilation = this->_blocks[k].conv.get_dilation();
for (long i = this->_receptive_field - _dilation, j = this->_input_buffer_offset - _dilation;
j < this->_input_buffer_offset; i++, j++)
for (long r = 0; r < this->_block_vals[k].rows(); r++)
this->_block_vals[k](r, i) = this->_block_vals[k](r, j);
const long dilation = mBlocks[k].conv.GetDilation();
for (long i = mReceptiveField - dilation, j = mInputBufferOffset - dilation;
j < mInputBufferOffset; i++, j++)
for (long r = 0; r < mBlockVals[k].rows(); r++)
mBlockVals[k](r, i) = mBlockVals[k](r, j);
}
// Now we can do the rest of the rewind
this->Buffer::_rewind_buffers_();
Buffer::RewindBuffers();
}
Loading