Skip to content

Commit

Permalink
Update kernel generators
Browse files Browse the repository at this point in the history
  • Loading branch information
aidangomez committed Aug 18, 2015
1 parent ad80f56 commit e1ee5ff
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 14 deletions.
90 changes: 79 additions & 11 deletions include/caffe/util/assign_conv_weights.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "caffe/solver.hpp"
#include "caffe/util/fft.hpp"
#include "caffe/util/read_audio.hpp"
#include "caffe/util/rng.hpp"


namespace caffe {
Expand Down Expand Up @@ -87,22 +88,34 @@ class ThinKernel : public KernelGen<T> {
const auto numKernels = KernelGen<T>::kNumKernels;
const auto kernelSize = KernelGen<T>::kKernelSize;

auto step = static_cast<double>(kernelSize) / numKernels;
auto kernelWeights = std::valarray<T>(kernelSize * numKernels);

for (auto kernel = 0; kernel < numKernels; ++kernel) {
auto peakLocation = step * kernel / kernelSize;
for (auto position = 0; position < kernelSize; ++position) {
auto x = static_cast<double>(position) / kernelSize;
kernelWeights[(kernel * kernelSize) + position] = KernelGen<T>::curve(x, peakLocation, kB);
T scale = 0.1;
caffe_rng_uniform<T>(numKernels * kernelSize, -scale, scale,
std::begin(kernelWeights));

std::normal_distribution<> random_distribution(0, 0.5);
std::function<T()> variate_generator = std::bind(random_distribution, std::ref(*caffe_rng()));

for (auto n = 0; n < numKernels; n += 1) {
auto ng = 2 * (variate_generator() + 0.5);
for (auto g = 0; g < ng; g += 1) {
auto height = variate_generator();
auto mid = kernelSize/2 + variate_generator()*kernelSize;
auto width = 1 + variate_generator();
for (auto i = 0; i < kernelSize; i += 1) {
auto offset = n * kernelSize + i;
kernelWeights[offset] += gaussian(i, height, mid, width);
}
}
}

return kernelWeights;
}

protected:
double kB = -0.0005;
inline T gaussian(T x, T height, T mid, T width) {
x -= mid;
return height * std::exp(-x*x / (2*width*width));
}
};

template <typename T>
Expand All @@ -129,8 +142,12 @@ class FFTKernel : public KernelGen<T> {

auto fft = caffe::FastFourierTransform<T>(2 * kernelSize, options);
fft.process(std::begin(fftBuffer), 2 * kernelSize);

std::move(std::begin(fftBuffer), std::begin(fftBuffer) + kernelSize, std::begin(data) + (i * kernelSize));

std::transform(std::begin(fftBuffer), std::begin(fftBuffer) + 2 * kernelSize, std::begin(fftBuffer), [kernelSize](const T& a){
return a / (2 * kernelSize);
});

std::move(std::begin(fftBuffer), std::begin(fftBuffer) + kernelSize, std::begin(data) + (i * kernelSize));
} else {
auto dataStart = std::begin(data) + ((i % 84) * kernelSize);
std::move_backward(dataStart, dataStart + kernelSize, std::begin(data) + (i * kernelSize));
Expand All @@ -141,6 +158,51 @@ class FFTKernel : public KernelGen<T> {
}
};

template <typename T>
class WaveletKernel : public KernelGen<T> {
public:
WaveletKernel(int numKernels, int kernelSize) : KernelGen<T>(numKernels, kernelSize) {}
WaveletKernel(WaveletKernel& other) : KernelGen<T>(other) {}
~WaveletKernel(){}

protected:
inline std::valarray<T> kernelGen() {
const auto numKernels = KernelGen<T>::kNumKernels;
const auto kernelSize = KernelGen<T>::kKernelSize;

auto data = std::valarray<T>(numKernels * kernelSize);

T scale = 0.1;
caffe_rng_uniform<T>(numKernels * kernelSize, -scale, scale,
std::begin(data));

std::uniform_real_distribution<> fdistribution(20, 20000);
std::function<T()> fgenerator = std::bind(fdistribution, std::ref(*caffe_rng()));

std::uniform_real_distribution<> pdistribution(-M_PI/2, M_PI/2);
std::function<T()> pgenerator = std::bind(pdistribution, std::ref(*caffe_rng()));

const auto width = kernelSize;
for (auto n = 0; n < numKernels; n += 1) {
const auto f = fgenerator();
const auto p = pgenerator();
const auto offset = n * kernelSize;
generate(f, p, std::begin(data) + offset, width);
}

return data;
}
inline void generate(T frequency, T phase, T* data, int capacity) {
const auto sampleRate = 44100.0;
const auto dt = 1.0 / sampleRate;
auto time = 0.0;
for (std::size_t i = 0; i < capacity; i += 1) {
data[i] = std::sin(2 * M_PI * frequency * time + phase);
time += dt;
}
}
};

template <typename T>
std::shared_ptr<caffe::Blob<T>> generateKernel(int numKernels, int kernelSize);
template <typename T>
Expand Down Expand Up @@ -173,6 +235,9 @@ template <typename T>
case caffe::FillerParameter_PeakType_THIN:
kernel = std::make_shared<ThinKernel<T>>(numKernels, kernelSize);
break;
case caffe::FillerParameter_PeakType_WAVELET:
kernel = std::make_shared<WaveletKernel<T>>(numKernels, kernelSize);
break;
}

auto data = static_cast<T*>(layer->blobs()[0]->data()->mutable_cpu_data()); // first blob is weights, second is biases
Expand All @@ -195,6 +260,9 @@ inline void assignConvolutionWeights(T* data, int numKernels, int kernelSize, ca
case caffe::FillerParameter_PeakType_THIN:
kernel = std::make_shared<ThinKernel<T>>(numKernels, kernelSize);
break;
case caffe::FillerParameter_PeakType_WAVELET:
kernel = std::make_shared<WaveletKernel<T>>(numKernels, kernelSize);
break;
}
kernel->generateKernel(data);
}
Expand Down
7 changes: 4 additions & 3 deletions src/caffe/proto/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,10 @@ message FillerParameter {
}
optional VarianceNorm variance_norm = 8 [default = FAN_IN];
enum PeakType {
THICK = 0;
THIN = 1;
FFT = 2;
THICK = 0;
THIN = 1;
FFT = 2;
WAVELET = 3;
}
optional PeakType peak_type = 9 [default = THIN];
}
Expand Down

0 comments on commit e1ee5ff

Please sign in to comment.