Skip to content

Commit

Permalink
optprov: make weight function configurable
Browse files Browse the repository at this point in the history
  • Loading branch information
dennis-tra committed Dec 9, 2022
1 parent 0dc7fac commit 36d41c7
Show file tree
Hide file tree
Showing 3 changed files with 110 additions and 22 deletions.
39 changes: 17 additions & 22 deletions netsize/netsize.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,22 +37,28 @@ type Estimator struct {
measurementsLk sync.RWMutex
measurements map[int][]measurement

netSizeCache *float64
netSizeCache *float64
weightFuncType WeightFuncType
}

func NewEstimator(localID peer.ID, rt *kbucket.RoutingTable, bucketSize int) *Estimator {
// initialize map to hold measurement observations
measurements := map[int][]measurement{}
for i := 0; i < bucketSize; i++ {
measurements[i] = []measurement{}
func NewEstimator(localID peer.ID, rt *kbucket.RoutingTable, opts ...Option) *Estimator {
es := &Estimator{
localID: kbucket.ConvertPeerID(localID),
rt: rt,
measurements: map[int][]measurement{},
bucketSize: DefaultBucketSize,
weightFuncType: DefaultWeightFuncType,
}

return &Estimator{
localID: kbucket.ConvertPeerID(localID),
rt: rt,
bucketSize: bucketSize,
measurements: measurements,
for _, opt := range opts {
opt(es)
}

for i := 0; i < es.bucketSize; i++ {
es.measurements[i] = []measurement{}
}

return es
}

// NormedDistance calculates the normed XOR distance of the given keys (from 0 to 1).
Expand Down Expand Up @@ -204,17 +210,6 @@ func (e *Estimator) NetworkSize() (float64, error) {
return netSize, nil
}

// calcWeight weighs data points exponentially less if they fall into a non-full bucket.
// It weighs distance estimates based on their CPLs and bucket levels.
// Bucket Level: 20 -> 1/2^0 -> weight: 1
// Bucket Level: 17 -> 1/2^3 -> weight: 1/8
// Bucket Level: 10 -> 1/2^10 -> weight: 1/1024
func (e *Estimator) calcWeight(key string) float64 {
cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID)
bucketLevel := e.rt.NPeersForCpl(uint(cpl))
return math.Pow(2, float64(bucketLevel-e.bucketSize))
}

// garbageCollect removes all measurements from the list that fell out of the measurement time window.
func (e *Estimator) garbageCollect() {
logger.Debug("Running garbage collection")
Expand Down
20 changes: 20 additions & 0 deletions netsize/options.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
package netsize

type Option func(*Estimator)

var (
DefaultBucketSize = 20
DefaultWeightFuncType = WeightFuncExponentialCPL
)

func WithBucketSize(bucketSize int) Option {
return func(es *Estimator) {
es.bucketSize = bucketSize
}
}

func WithWeightFunc(wft WeightFuncType) Option {
return func(es *Estimator) {
es.weightFuncType = wft
}
}
73 changes: 73 additions & 0 deletions netsize/weights.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package netsize

import (
"fmt"
"math"

kbucket "github.com/libp2p/go-libp2p-kbucket"
)

type WeightFuncType string

const (
WeightFuncNone WeightFuncType = "NONE"
WeightFuncInverse WeightFuncType = "INVERSE"
WeightFuncExponentialCPL WeightFuncType = "EXPONENTIAL_CPL"
WeightFuncExponentialBucketLevel WeightFuncType = "EXPONENTIAL_BUCKET_LEVEL"
)

// calcWeight selects the configured weight function and applies and calculates
// the weight of the data points.
func (e *Estimator) calcWeight(key string) float64 {
switch e.weightFuncType {
case WeightFuncNone:
return e.weightFuncNone(key)
case WeightFuncInverse:
return e.weightFuncInverse(key)
case WeightFuncExponentialCPL:
return e.weightFuncExponentialCPL(key)
case WeightFuncExponentialBucketLevel:
return e.weightFuncExponentialBucketLevel(key)
default:
panic(fmt.Sprintf("unknown weight func type %s", e.weightFuncType))
}
}

// weightFuncNone does not weigh data points but treats every sample the same.
func (e *Estimator) weightFuncNone(key string) float64 {
return 1
}

// weightFuncInverse decreases the weight of data points inverse proportional to its
// common prefix length between the requested key and the own peer ID.
// CPL: 0 -> 1/(0 + 1) -> 1
// CPL: 1 -> 1/(1 + 1) -> 0.5
// CPL: 2 -> 1/(2 + 1) -> 0.333
// CPL: 3 -> 1/(3 + 1) -> 0.25
func (e *Estimator) weightFuncInverse(key string) float64 {
cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID)
return 1 / (float64(cpl) + 1)
}

// weightFuncExponentialCPL is similar to weightFuncInverse but instead of applying
// an inverse proportional relationship it weighs data points exponentially less
// with increasing common prefix length between the requested key and the own peer ID.
// CPL: 0 -> 1/2**0 -> 1
// CPL: 1 -> 1/2**1 -> 0.5
// CPL: 2 -> 1/2**2 -> 0.25
// CPL: 3 -> 1/2**3 -> 0.125
func (e *Estimator) weightFuncExponentialCPL(key string) float64 {
cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID)
return 1 / math.Pow(2, float64(cpl))
}

// weightFuncExponentialBucketLevel weighs data points exponentially less if they fall into a non-full bucket.
// It weighs distance estimates based on their CPLs and bucket levels.
// Bucket Level: 20 -> 1/2^0 -> weight: 1
// Bucket Level: 17 -> 1/2^3 -> weight: 1/8
// Bucket Level: 10 -> 1/2^10 -> weight: 1/1024
func (e *Estimator) weightFuncExponentialBucketLevel(key string) float64 {
cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID)
bucketLevel := e.rt.NPeersForCpl(uint(cpl))
return math.Pow(2, float64(bucketLevel-e.bucketSize))
}

0 comments on commit 36d41c7

Please sign in to comment.