diff --git a/Gopkg.lock b/Gopkg.lock index 7aa8cad2..1fd17a95 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -139,14 +139,6 @@ pruneopts = "NUT" revision = "57eb5e1fc594ad4b0b1dbea7b286d299e0cb43c2" -[[projects]] - branch = "master" - digest = "1:ffc62713230cd494711f6a0f7ecb04aad4375893677307e87bb219e7677ffb00" - name = "github.com/go-graphite/go-whisper" - packages = ["."] - pruneopts = "NUT" - revision = "73c5cbd69142cbef9bd8ef9c6d485a8d34b4554c" - [[projects]] digest = "1:d32823ccbd16481c42356a1ae7f2284761da19cae3131b554a0b7c086a650292" name = "github.com/go-ini/ini" @@ -250,27 +242,6 @@ pruneopts = "NUT" revision = "bb797dc4fb8320488f47bf11de07a733d7233e1f" -[[projects]] - digest = "1:6648a6aa5d694e5eba37c2ee2a6ef275dc230d00a5b94e9c0f747b9788ebb11d" - name = "github.com/lomik/go-carbon" - packages = [ - "helper", - "persister", - "points", - "tags", - ] - pruneopts = "NUT" - revision = "c802cb57a59000797a8b7110a0b7c5ade40983d8" - version = "v0.13.0" - -[[projects]] - branch = "master" - digest = "1:b13a49b5deae0fad34bf7fd8fee6b06be8288fe67b0d28ae5a3b70de63e5749d" - name = "github.com/lomik/zapwriter" - packages = ["."] - pruneopts = "NUT" - revision = "2ec2b9a616807f3fe930c075c48fcf32d1f2d250" - [[projects]] branch = "master" digest = "1:fbf2e13dbd89ee228a027b6f49cc139731496b7e910d458d0ff95f95fe3c6250" @@ -301,6 +272,14 @@ pruneopts = "NUT" revision = "5a004441f897722c627870a981d02b29924215fa" +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "NUT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + [[projects]] digest = "1:702871fd067843dac45acf878c75d14fea30ad6084c93a9babfbf79fc9369a61" name = "github.com/raintank/metrictank" @@ -330,25 +309,12 @@ revision = "dfe15e36048539f4cda41f240b27a5ca25b9cd46" [[projects]] - branch = "master" - digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util", - ] + digest = "1:b5c8b4a0ad5f65a85eb2a9f89e30c638ef8b99f8a3f078467cea778869757666" + name = "github.com/stretchr/testify" + packages = ["assert"] pruneopts = "NUT" - revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43" + revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" + version = "v1.1.4" [[projects]] branch = "master" @@ -366,37 +332,6 @@ revision = "0cea1fa86e8403be1284013014f87ab942056de8" version = "v1.0-beta" -[[projects]] - digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "NUT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "NUT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:85674ac609b704fd4e9f463553b6ffc3a3527a993ae0ba550eb56beaabdfe094" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "NUT" - revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" - version = "v1.9.1" - [[projects]] branch = "master" digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" @@ -588,11 +523,11 @@ "github.com/gorilla/mux", "github.com/jpillora/backoff", "github.com/kisielk/og-rek", - "github.com/lomik/go-carbon/persister", "github.com/metrics20/go-metrics20/carbon20", "github.com/raintank/metrictank/cluster/partitioner", "github.com/sirupsen/logrus", "github.com/streadway/amqp", + "github.com/stretchr/testify/assert", "github.com/taylorchu/toki", "gopkg.in/raintank/schema.v1", "gopkg.in/raintank/schema.v1/msg", diff --git a/cfg/cfg.go b/cfg/cfg.go index e85f24f1..fb3234e2 100644 --- a/cfg/cfg.go +++ b/cfg/cfg.go @@ -1,12 +1,17 @@ package cfg import ( + "time" + "github.com/graphite-ng/carbon-relay-ng/validate" + m20 "github.com/metrics20/go-metrics20/carbon20" ) type Config struct { Listen_addr string + Plain_read_timeout Duration Pickle_addr string + Pickle_read_timeout Duration Admin_addr string Http_addr string Spool_dir string @@ -28,6 +33,29 @@ type Config struct { Rewriter []Rewriter } +func NewConfig() Config { + return Config{ + Plain_read_timeout: Duration{ + 2 * time.Minute, + }, + Pickle_read_timeout: Duration{ + 2 * time.Minute, + }, + Validation_level_legacy: validate.LevelLegacy{m20.MediumLegacy}, + Validation_level_m20: validate.LevelM20{m20.MediumM20}, + } +} + +type Duration struct { + time.Duration +} + +func (d *Duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} + type Aggregation struct { Function string Regex string diff --git a/cmd/carbon-relay-ng/carbon-relay-ng.go b/cmd/carbon-relay-ng/carbon-relay-ng.go index eecfeb58..884a4970 100644 --- a/cmd/carbon-relay-ng/carbon-relay-ng.go +++ b/cmd/carbon-relay-ng/carbon-relay-ng.go @@ -26,7 +26,6 @@ import ( tbl "github.com/graphite-ng/carbon-relay-ng/table" "github.com/graphite-ng/carbon-relay-ng/ui/telnet" "github.com/graphite-ng/carbon-relay-ng/ui/web" - m20 "github.com/metrics20/go-metrics20/carbon20" log "github.com/sirupsen/logrus" "strconv" @@ -36,7 +35,7 @@ import ( var ( config_file string - config cfg.Config + config = cfg.NewConfig() to_dispatch = make(chan []byte) inputs []input.Plugin shutdownTimeout = time.Second * 30 // how long to wait for shutdown @@ -63,10 +62,6 @@ func main() { runtime.SetBlockProfileRate(*blockProfileRate) runtime.MemProfileRate = *memProfileRate - // validation defaults - config.Validation_level_legacy.Level = m20.MediumLegacy - config.Validation_level_m20.Level = m20.MediumM20 - config_file = "/etc/carbon-relay-ng.ini" if 1 == flag.NArg() { val := flag.Arg(0) @@ -168,11 +163,11 @@ func main() { } if config.Listen_addr != "" { - inputs = append(inputs, input.NewPlain(config.Listen_addr, table)) + inputs = append(inputs, input.NewPlain(config.Listen_addr, config.Plain_read_timeout.Duration, table)) } if config.Pickle_addr != "" { - inputs = append(inputs, input.NewPickle(config.Pickle_addr, table)) + inputs = append(inputs, input.NewPickle(config.Pickle_addr, config.Pickle_read_timeout.Duration, table)) } if config.Amqp.Amqp_enabled == true { diff --git a/docs/logging.md b/docs/logging.md index 0d676235..b443f93d 100644 --- a/docs/logging.md +++ b/docs/logging.md @@ -1,8 +1,8 @@ # log level description * trace: for tracing messages from start to finish, including unroutable/discards [1] -* debug: state changes that we only need to know when debugging [1] -* info: harmless, but interesting not-so-common events. e.g. connection changes, manually triggered flushes, etc. (this used to be `notice`) +* debug: state changes that we only need to know when debugging, client conns opening and closing [1] +* info: harmless, but interesting not-so-common events. e.g. outbound connection changes, manually triggered flushes, etc. (this used to be `notice`) * warn: minor issues (network timeouts etc) * error: recoverable errors * fatal: errors and problems that result in shutdown diff --git a/examples/carbon-relay-ng.ini b/examples/carbon-relay-ng.ini index 76b603b1..52f42a5d 100644 --- a/examples/carbon-relay-ng.ini +++ b/examples/carbon-relay-ng.ini @@ -50,9 +50,13 @@ blacklist = [ ### plaintext Carbon ### listen_addr = "0.0.0.0:2003" +# close inbound plaintext connections if they've been idle for this long ("0s" to disable) +plain_read_timeout = "2m" ### Pickle Carbon ### pickle_addr = "0.0.0.0:2013" +# close inbound pickle connections if they've been idle for this long ("0s" to disable) +pickle_read_timeout = "2m" ### AMQP ### [amqp] diff --git a/vendor/github.com/go-graphite/go-whisper/LICENCE.txt b/go-whisper/LICENCE.txt similarity index 100% rename from vendor/github.com/go-graphite/go-whisper/LICENCE.txt rename to go-whisper/LICENCE.txt diff --git a/go-whisper/whisper.go b/go-whisper/whisper.go new file mode 100644 index 00000000..615bfb8f --- /dev/null +++ b/go-whisper/whisper.go @@ -0,0 +1,186 @@ +// package whisper is a copy of some stuff we need from https://github.com/go-graphite/go-whisper +// in particular, it removes flock stuff which doesn't build on windows +package whisper + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" +) + +const ( + PointSize = 12 + + Seconds = 1 + Minutes = 60 + Hours = 3600 + Days = 86400 + Weeks = 86400 * 7 + Years = 86400 * 365 +) + +func unitMultiplier(s string) (int, error) { + switch { + case strings.HasPrefix(s, "s"): + return Seconds, nil + case strings.HasPrefix(s, "m"): + return Minutes, nil + case strings.HasPrefix(s, "h"): + return Hours, nil + case strings.HasPrefix(s, "d"): + return Days, nil + case strings.HasPrefix(s, "w"): + return Weeks, nil + case strings.HasPrefix(s, "y"): + return Years, nil + } + return 0, fmt.Errorf("Invalid unit multiplier [%v]", s) +} + +var retentionRegexp *regexp.Regexp = regexp.MustCompile("^(\\d+)([smhdwy]+)$") + +func parseRetentionPart(retentionPart string) (int, error) { + part, err := strconv.ParseInt(retentionPart, 10, 32) + if err == nil { + return int(part), nil + } + if !retentionRegexp.MatchString(retentionPart) { + return 0, fmt.Errorf("%v", retentionPart) + } + matches := retentionRegexp.FindStringSubmatch(retentionPart) + value, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + panic(fmt.Sprintf("Regex on %v is borked, %v cannot be parsed as int", retentionPart, matches[1])) + } + multiplier, err := unitMultiplier(matches[2]) + return multiplier * int(value), err +} + +/* + Parse a retention definition as you would find in the storage-schemas.conf of a Carbon install. + Note that this only parses a single retention definition, if you have multiple definitions (separated by a comma) + you will have to split them yourself. + + ParseRetentionDef("10s:14d") Retention{10, 120960} + + See: http://graphite.readthedocs.org/en/1.0/config-carbon.html#storage-schemas-conf +*/ +func ParseRetentionDef(retentionDef string) (*Retention, error) { + parts := strings.Split(retentionDef, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("Not enough parts in retentionDef [%v]", retentionDef) + } + precision, err := parseRetentionPart(parts[0]) + if err != nil { + return nil, fmt.Errorf("Failed to parse precision: %v", err) + } + + points, err := parseRetentionPart(parts[1]) + if err != nil { + return nil, fmt.Errorf("Failed to parse points: %v", err) + } + points /= precision + + return &Retention{precision, points}, err +} + +func ParseRetentionDefs(retentionDefs string) (Retentions, error) { + retentions := make(Retentions, 0) + for _, retentionDef := range strings.Split(retentionDefs, ",") { + retention, err := ParseRetentionDef(retentionDef) + if err != nil { + return nil, err + } + retentions = append(retentions, retention) + } + return retentions, nil +} + +func validateRetentions(retentions Retentions) error { + if len(retentions) == 0 { + return fmt.Errorf("No retentions") + } + for i, retention := range retentions { + if i == len(retentions)-1 { + break + } + + nextRetention := retentions[i+1] + if !(retention.secondsPerPoint < nextRetention.secondsPerPoint) { + return fmt.Errorf("A Whisper database may not be configured having two archives with the same precision (archive%v: %v, archive%v: %v)", i, retention, i+1, nextRetention) + } + + if mod(nextRetention.secondsPerPoint, retention.secondsPerPoint) != 0 { + return fmt.Errorf("Higher precision archives' precision must evenly divide all lower precision archives' precision (archive%v: %v, archive%v: %v)", i, retention.secondsPerPoint, i+1, nextRetention.secondsPerPoint) + } + + if retention.MaxRetention() >= nextRetention.MaxRetention() { + return fmt.Errorf("Lower precision archives must cover larger time intervals than higher precision archives (archive%v: %v seconds, archive%v: %v seconds)", i, retention.MaxRetention(), i+1, nextRetention.MaxRetention()) + } + + if retention.numberOfPoints < (nextRetention.secondsPerPoint / retention.secondsPerPoint) { + return fmt.Errorf("Each archive must have at least enough points to consolidate to the next archive (archive%v consolidates %v of archive%v's points but it has only %v total points)", i+1, nextRetention.secondsPerPoint/retention.secondsPerPoint, i, retention.numberOfPoints) + } + } + return nil +} + +/* + A retention level. + + Retention levels describe a given archive in the database. How detailed it is and how far back + it records. +*/ +type Retention struct { + secondsPerPoint int + numberOfPoints int +} + +func (retention *Retention) MaxRetention() int { + return retention.secondsPerPoint * retention.numberOfPoints +} + +func (retention *Retention) Size() int { + return retention.numberOfPoints * PointSize +} + +func (retention *Retention) SecondsPerPoint() int { + return retention.secondsPerPoint +} + +func (retention *Retention) NumberOfPoints() int { + return retention.numberOfPoints +} + +func NewRetention(secondsPerPoint, numberOfPoints int) Retention { + return Retention{ + secondsPerPoint, + numberOfPoints, + } +} + +type Retentions []*Retention + +func (r Retentions) Len() int { + return len(r) +} + +func (r Retentions) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +type retentionsByPrecision struct{ Retentions } + +func (r retentionsByPrecision) Less(i, j int) bool { + return r.Retentions[i].secondsPerPoint < r.Retentions[j].secondsPerPoint +} + +/* + Implementation of modulo that works like Python + Thanks @timmow for this +*/ +func mod(a, b int) int { + return a - (b * int(math.Floor(float64(a)/float64(b)))) +} diff --git a/go-whisper/whisper_test.go b/go-whisper/whisper_test.go new file mode 100644 index 00000000..68417b56 --- /dev/null +++ b/go-whisper/whisper_test.go @@ -0,0 +1,56 @@ +package whisper + +import ( + "fmt" + "sort" + "testing" +) + +func testParseRetentionDef(t *testing.T, retentionDef string, expectedPrecision, expectedPoints int, hasError bool) { + errTpl := fmt.Sprintf("Expected %%v to be %%v but received %%v for retentionDef %v", retentionDef) + + retention, err := ParseRetentionDef(retentionDef) + + if (err == nil && hasError) || (err != nil && !hasError) { + if hasError { + t.Fatalf("Expected error but received none for retentionDef %v", retentionDef) + } else { + t.Fatalf("Expected no error but received %v for retentionDef %v", err, retentionDef) + } + } + if err == nil { + if retention.secondsPerPoint != expectedPrecision { + t.Fatalf(errTpl, "precision", expectedPrecision, retention.secondsPerPoint) + } + if retention.numberOfPoints != expectedPoints { + t.Fatalf(errTpl, "points", expectedPoints, retention.numberOfPoints) + } + } +} + +func TestParseRetentionDef(t *testing.T) { + testParseRetentionDef(t, "1s:5m", 1, 300, false) + testParseRetentionDef(t, "1m:30m", 60, 30, false) + testParseRetentionDef(t, "1m", 0, 0, true) + testParseRetentionDef(t, "1m:30m:20s", 0, 0, true) + testParseRetentionDef(t, "1f:30s", 0, 0, true) + testParseRetentionDef(t, "1m:30f", 0, 0, true) +} + +func TestParseRetentionDefs(t *testing.T) { + retentions, err := ParseRetentionDefs("1s:5m,1m:30m") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if length := len(retentions); length != 2 { + t.Fatalf("Expected 2 retentions, received %v", length) + } +} + +func TestSortRetentions(t *testing.T) { + retentions := Retentions{{300, 12}, {60, 30}, {1, 300}} + sort.Sort(retentionsByPrecision{retentions}) + if retentions[0].secondsPerPoint != 1 { + t.Fatalf("Retentions array is not sorted") + } +} diff --git a/input/init.go b/input/init.go index ec0e4be3..cf12ed72 100644 --- a/input/init.go +++ b/input/init.go @@ -1,8 +1,6 @@ package input -import ( - "io" -) +import "io" type Plugin interface { Name() string diff --git a/input/listen.go b/input/listen.go index 9484cf57..1cf71320 100644 --- a/input/listen.go +++ b/input/listen.go @@ -13,21 +13,23 @@ import ( // Listener takes care of TCP/UDP networking // and relies on the Handler to take care of reading data type Listener struct { - wg sync.WaitGroup - name string - addr string - tcpList *net.TCPListener - udpConn *net.UDPConn - handler Handler - shutdown chan struct{} + wg sync.WaitGroup + name string + addr string + readTimeout time.Duration + tcpList *net.TCPListener + udpConn *net.UDPConn + handler Handler + shutdown chan struct{} } -func NewListener(name, addr string, handler Handler) *Listener { +func NewListener(name, addr string, readTimeout time.Duration, handler Handler) *Listener { return &Listener{ - name: name, - addr: addr, - handler: handler, - shutdown: make(chan struct{}), + name: name, + addr: addr, + readTimeout: readTimeout, + handler: handler, + shutdown: make(chan struct{}), } } @@ -141,7 +143,11 @@ func (l *Listener) acceptTcpConn(c net.Conn) { } }() - l.handler.Handle(c) + l.handler.Handle(NewTimeoutConn(c, l.readTimeout)) + rAddr := c.RemoteAddr() + if rAddr != nil { + log.Debugf("handler returned. %v closing conn with %s", l.addr, rAddr) + } c.Close() } diff --git a/input/listen_test.go b/input/listen_test.go index afc826ef..b5ffb79c 100644 --- a/input/listen_test.go +++ b/input/listen_test.go @@ -47,7 +47,7 @@ func (m *mockHandler) String() string { func TestTcpUdpShutdown(t *testing.T) { handler := mockHandler{testing: t} addr := "localhost:" // choose random ports - listener := NewListener("mock", addr, &handler) + listener := NewListener("mock", addr, 0, &handler) err := listener.Start() if err != nil { t.Fatalf("Error when trying to listen: %s", err) @@ -61,7 +61,7 @@ func TestTcpUdpShutdown(t *testing.T) { func TestTcpConnection(t *testing.T) { handler := mockHandler{testing: t} addr := "localhost:" // choose random ports - listener := NewListener("mock", addr, &handler) + listener := NewListener("mock", addr, 0, &handler) err := listener.Start() if err != nil { t.Fatalf("Error when listening: %s", err) @@ -100,7 +100,7 @@ func TestTcpConnection(t *testing.T) { func TestUdpConnection(t *testing.T) { handler := mockHandler{testing: t} addr := "localhost:" // choose random ports - listener := NewListener("mock", addr, &handler) + listener := NewListener("mock", addr, 0, &handler) err := listener.Start() if err != nil { t.Fatalf("Error when listening: %s", err) diff --git a/input/pickle.go b/input/pickle.go index 36b1ffde..c260b715 100644 --- a/input/pickle.go +++ b/input/pickle.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "math/big" + "time" ogorek "github.com/kisielk/og-rek" log "github.com/sirupsen/logrus" @@ -16,8 +17,8 @@ type Pickle struct { dispatcher Dispatcher } -func NewPickle(addr string, dispatcher Dispatcher) *Listener { - return NewListener("pickle", addr, &Pickle{dispatcher}) +func NewPickle(addr string, readTimeout time.Duration, dispatcher Dispatcher) *Listener { + return NewListener("pickle", addr, readTimeout, &Pickle{dispatcher}) } func (p *Pickle) Handle(c io.Reader) { diff --git a/input/plain.go b/input/plain.go index c612b8ba..aa4c7065 100644 --- a/input/plain.go +++ b/input/plain.go @@ -2,16 +2,18 @@ package input import ( "bufio" - log "github.com/sirupsen/logrus" "io" + "time" + + log "github.com/sirupsen/logrus" ) type Plain struct { dispatcher Dispatcher } -func NewPlain(addr string, dispatcher Dispatcher) *Listener { - return NewListener("plain", addr, &Plain{dispatcher}) +func NewPlain(addr string, readTimeout time.Duration, dispatcher Dispatcher) *Listener { + return NewListener("plain", addr, readTimeout, &Plain{dispatcher}) } func (p *Plain) Handle(c io.Reader) { diff --git a/input/timeout_conn.go b/input/timeout_conn.go new file mode 100644 index 00000000..1bcd2457 --- /dev/null +++ b/input/timeout_conn.go @@ -0,0 +1,29 @@ +package input + +import ( + "net" + "time" +) + +// TimeoutConn automatically applies a read deadline on a conn upon every read +type TimeoutConn struct { + conn net.Conn + readTimeout time.Duration +} + +func NewTimeoutConn(conn net.Conn, readTimeout time.Duration) TimeoutConn { + return TimeoutConn{ + conn: conn, + readTimeout: readTimeout, + } +} + +func (t TimeoutConn) Read(p []byte) (n int, err error) { + if t.readTimeout > 0 { + err = t.conn.SetReadDeadline(time.Now().Add(t.readTimeout)) + if err != nil { + return 0, err + } + } + return t.conn.Read(p) +} diff --git a/vendor/github.com/lomik/go-carbon/LICENSE.md b/persister/LICENSE.md similarity index 100% rename from vendor/github.com/lomik/go-carbon/LICENSE.md rename to persister/LICENSE.md diff --git a/vendor/github.com/lomik/go-carbon/persister/ini.go b/persister/ini.go similarity index 100% rename from vendor/github.com/lomik/go-carbon/persister/ini.go rename to persister/ini.go diff --git a/persister/init.go b/persister/init.go new file mode 100644 index 00000000..d7119ced --- /dev/null +++ b/persister/init.go @@ -0,0 +1,4 @@ +// package persister is a copy of github.com/lomik/go-carbon/persister +// with non essential stuff removed, and using a fork of go-whisper, +// which allows us to build on windows +package persister diff --git a/vendor/github.com/lomik/go-carbon/persister/whisper_schema.go b/persister/whisper_schema.go similarity index 98% rename from vendor/github.com/lomik/go-carbon/persister/whisper_schema.go rename to persister/whisper_schema.go index 09d2e666..04f185fa 100644 --- a/vendor/github.com/lomik/go-carbon/persister/whisper_schema.go +++ b/persister/whisper_schema.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/go-graphite/go-whisper" + "github.com/graphite-ng/carbon-relay-ng/go-whisper" ) // Schema represents one schema setting diff --git a/persister/whisper_schema_test.go b/persister/whisper_schema_test.go new file mode 100644 index 00000000..c53fbd3d --- /dev/null +++ b/persister/whisper_schema_test.go @@ -0,0 +1,275 @@ +package persister + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/graphite-ng/carbon-relay-ng/go-whisper" + "github.com/stretchr/testify/assert" +) + +func assertRetentionsEq(t *testing.T, ret whisper.Retentions, s string) { + assert := assert.New(t) + // s - good retentions string for compare ret with expected + expected, err := ParseRetentionDefs(s) + if err != nil { + // wtf? + t.Fatal(err) + return + } + + if assert.Nil(err) && assert.Equal(ret.Len(), expected.Len()) { + for i := 0; i < expected.Len(); i++ { + assert.Equal(expected[i].NumberOfPoints(), ret[i].NumberOfPoints()) + assert.Equal(expected[i].SecondsPerPoint(), ret[i].SecondsPerPoint()) + } + } +} + +func TestParseRetentionDefs(t *testing.T) { + assert := assert.New(t) + ret, err := ParseRetentionDefs("10s:24h,60s:30d,1h:5y") + + if assert.Nil(err) && assert.Equal(3, ret.Len()) { + assert.Equal(8640, ret[0].NumberOfPoints()) + assert.Equal(10, ret[0].SecondsPerPoint()) + + assert.Equal(43200, ret[1].NumberOfPoints()) + assert.Equal(60, ret[1].SecondsPerPoint()) + + assert.Equal(43800, ret[2].NumberOfPoints()) + assert.Equal(3600, ret[2].SecondsPerPoint()) + } + + // test strip spaces + if ret, err = ParseRetentionDefs("10s:24h, 60s:30d, 1h:5y"); assert.Nil(err) { + assertRetentionsEq(t, ret, "10s:1d,1m:30d,1h:5y") + } + + // old format of retentions + if ret, err = ParseRetentionDefs("60:43200,3600:43800"); assert.Nil(err) { + assertRetentionsEq(t, ret, "1m:30d,1h:5y") + } + + // unknown letter + ret, err = ParseRetentionDefs("10s:24v") + assert.Error(err) +} + +func parseSchemas(t *testing.T, content string) (WhisperSchemas, error) { + tmpFile, err := ioutil.TempFile("", "schemas-") + if err != nil { + t.Fatal(err) + return nil, nil + } + tmpFile.Write([]byte(content)) + tmpFile.Close() + + schemas, err := ReadWhisperSchemas(tmpFile.Name()) + + if removeErr := os.Remove(tmpFile.Name()); removeErr != nil { + t.Fatal(removeErr) + } + + return schemas, err +} + +type testcase struct { + name string + pattern string + retentions string +} + +func assertSchemas(t *testing.T, content string, expected []testcase, msg ...interface{}) WhisperSchemas { + schemas, err := parseSchemas(t, content) + if expected == nil { + assert.Error(t, err, msg...) + return nil + } + assert.Equal(t, len(expected), len(schemas)) + for i := 0; i < len(expected); i++ { + assert.Equal(t, expected[i].name, schemas[i].Name) + assert.Equal(t, expected[i].pattern, schemas[i].Pattern.String()) + assertRetentionsEq(t, schemas[i].Retentions, expected[i].retentions) + } + + return schemas +} + +func TestParseSchemasSimple(t *testing.T) { + assertSchemas(t, ` +[carbon] +pattern = ^carbon\. +retentions = 60s:90d + +[default] +pattern = .* +retentions = 1m:30d,1h:5y + `, + []testcase{ + {"carbon", "^carbon\\.", "60s:90d"}, + {"default", ".*", "1m:30d,1h:5y"}, + }, + ) +} + +func TestParseSchemasComment(t *testing.T) { + assertSchemas(t, ` +# This is a wild comment +[carbon] +pattern = ^carbon\. +retentions = 60s:90d + `, + []testcase{ + {"carbon", "^carbon\\.", "60s:90d"}, + }, + ) +} + +func TestParseSchemasSortingAndMatch(t *testing.T) { + // mixed record with and without priority. match metrics + assert := assert.New(t) + + schemas := assertSchemas(t, ` +[carbon] +pattern = ^carbon\. +retentions = 60s:90d + +[db] +pattern = ^db\. +retentions = 1m:30d,1h:5y + +[collector] +pattern = ^.*\.collector\. +retentions = 5s:300s,300s:30d +priority = 10 + +[gitlab] +pattern = ^gitlab\. +retentions = 1s:7d +priority = 100 + +[jira] +pattern = ^server\. +retentions = 1s:7d +priority = 10 + `, + []testcase{ + {"gitlab", "^gitlab\\.", "1s:7d"}, + {"collector", "^.*\\.collector\\.", "5s:5m,5m:30d"}, + {"jira", "^server\\.", "1s:7d"}, + {"carbon", "^carbon\\.", "1m:90d"}, + {"db", "^db\\.", "1m:30d,1h:5y"}, + }, + ) + + matched, ok := schemas.Match("db.collector.cpu") + if assert.True(ok) { + assert.Equal("collector", matched.Name) + } + + matched, ok = schemas.Match("db.mysql.rps") + if assert.True(ok) { + assert.Equal("db", matched.Name) + } + + matched, ok = schemas.Match("unknown") + assert.False(ok) +} + +func TestSchemasNotFound(t *testing.T) { + // create and remove file + assert := assert.New(t) + + tmpFile, err := ioutil.TempFile("", "schemas-") + if err != nil { + t.Fatal(err) + } + tmpFile.Close() + + if err := os.Remove(tmpFile.Name()); err != nil { + t.Fatal(err) + } + + schemas, err := ReadWhisperSchemas(tmpFile.Name()) + + assert.Nil(schemas) + assert.Error(err) +} + +func TestParseWrongSchemas(t *testing.T) { + /* + Cases: + 1. no pattern + 2. no retentions + 3. wrong pattern + 4. wrong retentions + 5. wrong priority + 6. empty pattern + 7. empty retentions + 8. empty priority + */ + + // has no pattern + assertSchemas(t, ` +[carbon] +parrent = ^carbon\. +retentions = 60s:90d + +[db] +pattern = ^db\. +retentions = 1m:30d,1h:5y +`, nil, "No pattern") + + // no retentions + assertSchemas(t, ` +[carbon] +pattern = ^carbon\. +ret = 60s:90d +`, nil, "No retentions") + + // wrong pattern + assertSchemas(t, ` +[carbon] +pattern = ^carb(on\. +retentions = 60s:90d +`, nil, "Wrong pattern") + + // wrong retentions + assertSchemas(t, ` +[carbon] +pattern = ^carbon\. +retentions = 60v:90d +`, nil, "Wrong retentions") + + // wrong priority + assertSchemas(t, ` +[carbon] +pattern = ^carbon\. +retentions = 60s:90d +priority = ab +`, nil, "Wrong priority") + + // empty pattern + assertSchemas(t, ` +[carbon] +pattern = +retentions = 60s:90d +`, nil, "Empty pattern") + + // empty retentions + assertSchemas(t, ` +[carbon] +pattern = ^carbon\. +retentions = +`, nil, "Empty retentions") + + // empty priority + assertSchemas(t, ` +[carbon] +pattern = ^carb(on\. +retentions = 60s:90d +priority = +`, nil, "Empty priority") +} diff --git a/route/grafananet.go b/route/grafananet.go index 87af6cb5..626f9c94 100644 --- a/route/grafananet.go +++ b/route/grafananet.go @@ -16,12 +16,12 @@ import ( "github.com/golang/snappy" dest "github.com/graphite-ng/carbon-relay-ng/destination" "github.com/graphite-ng/carbon-relay-ng/matcher" + "github.com/graphite-ng/carbon-relay-ng/persister" "github.com/graphite-ng/carbon-relay-ng/stats" "github.com/graphite-ng/carbon-relay-ng/util" "github.com/jpillora/backoff" log "github.com/sirupsen/logrus" - "github.com/lomik/go-carbon/persister" "gopkg.in/raintank/schema.v1" "gopkg.in/raintank/schema.v1/msg" ) diff --git a/route/kafkamdm.go b/route/kafkamdm.go index 6c1b206a..d72650a9 100644 --- a/route/kafkamdm.go +++ b/route/kafkamdm.go @@ -14,7 +14,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/Shopify/sarama" - "github.com/lomik/go-carbon/persister" + "github.com/graphite-ng/carbon-relay-ng/persister" "github.com/raintank/metrictank/cluster/partitioner" "gopkg.in/raintank/schema.v1" ) diff --git a/route/schemas.go b/route/schemas.go index 65763dfc..9e77201c 100644 --- a/route/schemas.go +++ b/route/schemas.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/lomik/go-carbon/persister" + "github.com/graphite-ng/carbon-relay-ng/persister" "gopkg.in/raintank/schema.v1" ) diff --git a/route/schemas_test.go b/route/schemas_test.go index b2f54376..035515d4 100644 --- a/route/schemas_test.go +++ b/route/schemas_test.go @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/lomik/go-carbon/persister" + "github.com/graphite-ng/carbon-relay-ng/persister" "gopkg.in/raintank/schema.v1" ) diff --git a/vendor/github.com/go-graphite/go-whisper/whisper.go b/vendor/github.com/go-graphite/go-whisper/whisper.go deleted file mode 100644 index 4f09ea29..00000000 --- a/vendor/github.com/go-graphite/go-whisper/whisper.go +++ /dev/null @@ -1,1144 +0,0 @@ -/* - Package whisper implements Graphite's Whisper database format -*/ -package whisper - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "os" - "regexp" - "sort" - "strconv" - "strings" - "syscall" - "time" -) - -const ( - IntSize = 4 - FloatSize = 4 - Float64Size = 8 - PointSize = 12 - MetadataSize = 16 - ArchiveInfoSize = 12 -) - -const ( - Seconds = 1 - Minutes = 60 - Hours = 3600 - Days = 86400 - Weeks = 86400 * 7 - Years = 86400 * 365 -) - -type AggregationMethod int - -const ( - Average AggregationMethod = iota + 1 - Sum - Last - Max - Min -) - -type Options struct { - Sparse bool - FLock bool -} - -func unitMultiplier(s string) (int, error) { - switch { - case strings.HasPrefix(s, "s"): - return Seconds, nil - case strings.HasPrefix(s, "m"): - return Minutes, nil - case strings.HasPrefix(s, "h"): - return Hours, nil - case strings.HasPrefix(s, "d"): - return Days, nil - case strings.HasPrefix(s, "w"): - return Weeks, nil - case strings.HasPrefix(s, "y"): - return Years, nil - } - return 0, fmt.Errorf("Invalid unit multiplier [%v]", s) -} - -var retentionRegexp *regexp.Regexp = regexp.MustCompile("^(\\d+)([smhdwy]+)$") - -func parseRetentionPart(retentionPart string) (int, error) { - part, err := strconv.ParseInt(retentionPart, 10, 32) - if err == nil { - return int(part), nil - } - if !retentionRegexp.MatchString(retentionPart) { - return 0, fmt.Errorf("%v", retentionPart) - } - matches := retentionRegexp.FindStringSubmatch(retentionPart) - value, err := strconv.ParseInt(matches[1], 10, 32) - if err != nil { - panic(fmt.Sprintf("Regex on %v is borked, %v cannot be parsed as int", retentionPart, matches[1])) - } - multiplier, err := unitMultiplier(matches[2]) - return multiplier * int(value), err -} - -/* - Parse a retention definition as you would find in the storage-schemas.conf of a Carbon install. - Note that this only parses a single retention definition, if you have multiple definitions (separated by a comma) - you will have to split them yourself. - - ParseRetentionDef("10s:14d") Retention{10, 120960} - - See: http://graphite.readthedocs.org/en/1.0/config-carbon.html#storage-schemas-conf -*/ -func ParseRetentionDef(retentionDef string) (*Retention, error) { - parts := strings.Split(retentionDef, ":") - if len(parts) != 2 { - return nil, fmt.Errorf("Not enough parts in retentionDef [%v]", retentionDef) - } - precision, err := parseRetentionPart(parts[0]) - if err != nil { - return nil, fmt.Errorf("Failed to parse precision: %v", err) - } - - points, err := parseRetentionPart(parts[1]) - if err != nil { - return nil, fmt.Errorf("Failed to parse points: %v", err) - } - points /= precision - - return &Retention{precision, points}, err -} - -func ParseRetentionDefs(retentionDefs string) (Retentions, error) { - retentions := make(Retentions, 0) - for _, retentionDef := range strings.Split(retentionDefs, ",") { - retention, err := ParseRetentionDef(retentionDef) - if err != nil { - return nil, err - } - retentions = append(retentions, retention) - } - return retentions, nil -} - -/* - Represents a Whisper database file. -*/ -type Whisper struct { - file *os.File - - // Metadata - aggregationMethod AggregationMethod - maxRetention int - xFilesFactor float32 - archives []*archiveInfo -} - -// Wrappers for whisper.file operations -func (whisper *Whisper) fileWriteAt(b []byte, off int64) error { - _, err := whisper.file.WriteAt(b, off) - return err -} - -// Wrappers for file.ReadAt operations -func (whisper *Whisper) fileReadAt(b []byte, off int64) error { - _, err := whisper.file.ReadAt(b, off) - return err -} - -/* - Create a new Whisper database file and write it's header. -*/ -func Create(path string, retentions Retentions, aggregationMethod AggregationMethod, xFilesFactor float32) (whisper *Whisper, err error) { - return CreateWithOptions(path, retentions, aggregationMethod, xFilesFactor, &Options{ - Sparse: false, - FLock: false, - }) -} - -// CreateWithOptions is more customizable create function -func CreateWithOptions(path string, retentions Retentions, aggregationMethod AggregationMethod, xFilesFactor float32, options *Options) (whisper *Whisper, err error) { - if options == nil { - options = &Options{} - } - sort.Sort(retentionsByPrecision{retentions}) - if err = validateRetentions(retentions); err != nil { - return nil, err - } - _, err = os.Stat(path) - if err == nil { - return nil, os.ErrExist - } - file, err := os.Create(path) - if err != nil { - return nil, err - } - - if options.FLock { - if err = syscall.Flock(int(file.Fd()), syscall.LOCK_EX); err != nil { - file.Close() - return nil, err - } - } - - whisper = new(Whisper) - - // Set the metadata - whisper.file = file - whisper.aggregationMethod = aggregationMethod - whisper.xFilesFactor = xFilesFactor - for _, retention := range retentions { - if retention.MaxRetention() > whisper.maxRetention { - whisper.maxRetention = retention.MaxRetention() - } - } - - // Set the archive info - offset := MetadataSize + (ArchiveInfoSize * len(retentions)) - whisper.archives = make([]*archiveInfo, 0, len(retentions)) - for _, retention := range retentions { - whisper.archives = append(whisper.archives, &archiveInfo{*retention, offset}) - offset += retention.Size() - } - - err = whisper.writeHeader() - if err != nil { - return nil, err - } - - // pre-allocate file size, fallocate proved slower - if options.Sparse { - if _, err = whisper.file.Seek(int64(whisper.Size()-1), 0); err != nil { - return nil, err - } - if _, err = whisper.file.Write([]byte{0}); err != nil { - return nil, err - } - } else { - remaining := whisper.Size() - whisper.MetadataSize() - chunkSize := 16384 - zeros := make([]byte, chunkSize) - for remaining > chunkSize { - if _, err = whisper.file.Write(zeros); err != nil { - return nil, err - } - remaining -= chunkSize - } - if _, err = whisper.file.Write(zeros[:remaining]); err != nil { - return nil, err - } - } - // whisper.file.Sync() - - return whisper, nil -} - -func validateRetentions(retentions Retentions) error { - if len(retentions) == 0 { - return fmt.Errorf("No retentions") - } - for i, retention := range retentions { - if i == len(retentions)-1 { - break - } - - nextRetention := retentions[i+1] - if !(retention.secondsPerPoint < nextRetention.secondsPerPoint) { - return fmt.Errorf("A Whisper database may not be configured having two archives with the same precision (archive%v: %v, archive%v: %v)", i, retention, i+1, nextRetention) - } - - if mod(nextRetention.secondsPerPoint, retention.secondsPerPoint) != 0 { - return fmt.Errorf("Higher precision archives' precision must evenly divide all lower precision archives' precision (archive%v: %v, archive%v: %v)", i, retention.secondsPerPoint, i+1, nextRetention.secondsPerPoint) - } - - if retention.MaxRetention() >= nextRetention.MaxRetention() { - return fmt.Errorf("Lower precision archives must cover larger time intervals than higher precision archives (archive%v: %v seconds, archive%v: %v seconds)", i, retention.MaxRetention(), i+1, nextRetention.MaxRetention()) - } - - if retention.numberOfPoints < (nextRetention.secondsPerPoint / retention.secondsPerPoint) { - return fmt.Errorf("Each archive must have at least enough points to consolidate to the next archive (archive%v consolidates %v of archive%v's points but it has only %v total points)", i+1, nextRetention.secondsPerPoint/retention.secondsPerPoint, i, retention.numberOfPoints) - } - } - return nil -} - -/* - Open an existing Whisper database and read it's header -*/ -func Open(path string) (whisper *Whisper, err error) { - return OpenWithOptions(path, &Options{ - FLock: false, - }) -} - -func OpenWithOptions(path string, options *Options) (whisper *Whisper, err error) { - file, err := os.OpenFile(path, os.O_RDWR, 0666) - if err != nil { - return - } - - defer func() { - if err != nil { - whisper = nil - file.Close() - } - }() - - if options.FLock { - if err = syscall.Flock(int(file.Fd()), syscall.LOCK_EX); err != nil { - return - } - } - - whisper = new(Whisper) - whisper.file = file - - offset := 0 - - // read the metadata - b := make([]byte, MetadataSize) - readed, err := file.Read(b) - - if err != nil { - err = fmt.Errorf("Unable to read header: %s", err.Error()) - return - } - if readed != MetadataSize { - err = fmt.Errorf("Unable to read header: EOF") - return - } - - a := unpackInt(b[offset : offset+IntSize]) - if a > 1024 { // support very old format. File starts with lastUpdate and has only average aggregation method - whisper.aggregationMethod = Average - } else { - whisper.aggregationMethod = AggregationMethod(a) - } - offset += IntSize - whisper.maxRetention = unpackInt(b[offset : offset+IntSize]) - offset += IntSize - whisper.xFilesFactor = unpackFloat32(b[offset : offset+FloatSize]) - offset += FloatSize - archiveCount := unpackInt(b[offset : offset+IntSize]) - offset += IntSize - - // read the archive info - b = make([]byte, ArchiveInfoSize) - - whisper.archives = make([]*archiveInfo, 0) - for i := 0; i < archiveCount; i++ { - readed, err = file.Read(b) - if err != nil || readed != ArchiveInfoSize { - err = fmt.Errorf("Unable to read archive %d metadata", i) - return - } - whisper.archives = append(whisper.archives, unpackArchiveInfo(b)) - } - - return whisper, nil -} - -func (whisper *Whisper) writeHeader() (err error) { - b := make([]byte, whisper.MetadataSize()) - i := 0 - i += packInt(b, int(whisper.aggregationMethod), i) - i += packInt(b, whisper.maxRetention, i) - i += packFloat32(b, whisper.xFilesFactor, i) - i += packInt(b, len(whisper.archives), i) - for _, archive := range whisper.archives { - i += packInt(b, archive.offset, i) - i += packInt(b, archive.secondsPerPoint, i) - i += packInt(b, archive.numberOfPoints, i) - } - _, err = whisper.file.Write(b) - - return err -} - -/* - Close the whisper file -*/ -func (whisper *Whisper) Close() { - whisper.file.Close() -} - -/* - Calculate the total number of bytes the Whisper file should be according to the metadata. -*/ -func (whisper *Whisper) Size() int { - size := whisper.MetadataSize() - for _, archive := range whisper.archives { - size += archive.Size() - } - return size -} - -/* - Calculate the number of bytes the metadata section will be. -*/ -func (whisper *Whisper) MetadataSize() int { - return MetadataSize + (ArchiveInfoSize * len(whisper.archives)) -} - -/* Return aggregation method */ -func (whisper *Whisper) AggregationMethod() string { - aggr := "unknown" - switch whisper.aggregationMethod { - case Average: - aggr = "Average" - case Sum: - aggr = "Sum" - case Last: - aggr = "Last" - case Max: - aggr = "Max" - case Min: - aggr = "Min" - } - return aggr -} - -/* Return max retention in seconds */ -func (whisper *Whisper) MaxRetention() int { - return whisper.maxRetention -} - -/* Return xFilesFactor */ -func (whisper *Whisper) XFilesFactor() float32 { - return whisper.xFilesFactor -} - -/* Return retentions */ -func (whisper *Whisper) Retentions() []Retention { - ret := make([]Retention, 0, 4) - for _, archive := range whisper.archives { - ret = append(ret, archive.Retention) - } - - return ret -} - -/* - Update a value in the database. - - If the timestamp is in the future or outside of the maximum retention it will - fail immediately. -*/ -func (whisper *Whisper) Update(value float64, timestamp int) (err error) { - // recover panics and return as error - defer func() { - if e := recover(); e != nil { - err = errors.New(e.(string)) - } - }() - - diff := int(time.Now().Unix()) - timestamp - if !(diff < whisper.maxRetention && diff >= 0) { - return fmt.Errorf("Timestamp not covered by any archives in this database") - } - var archive *archiveInfo - var lowerArchives []*archiveInfo - var i int - for i, archive = range whisper.archives { - if archive.MaxRetention() < diff { - continue - } - lowerArchives = whisper.archives[i+1:] // TODO: investigate just returning the positions - break - } - - myInterval := timestamp - mod(timestamp, archive.secondsPerPoint) - point := dataPoint{myInterval, value} - - _, err = whisper.file.WriteAt(point.Bytes(), whisper.getPointOffset(myInterval, archive)) - if err != nil { - return err - } - - higher := archive - for _, lower := range lowerArchives { - propagated, err := whisper.propagate(myInterval, higher, lower) - if err != nil { - return err - } else if !propagated { - break - } - higher = lower - } - - return nil -} - -func reversePoints(points []*TimeSeriesPoint) { - size := len(points) - end := size / 2 - - for i := 0; i < end; i++ { - points[i], points[size-i-1] = points[size-i-1], points[i] - } -} - -func (whisper *Whisper) UpdateMany(points []*TimeSeriesPoint) (err error) { - // recover panics and return as error - defer func() { - if e := recover(); e != nil { - err = errors.New(e.(string)) - } - }() - - // sort the points, newest first - reversePoints(points) - sort.Stable(timeSeriesPointsNewestFirst{points}) - - now := int(time.Now().Unix()) // TODO: danger of 2030 something overflow - - var currentPoints []*TimeSeriesPoint - for _, archive := range whisper.archives { - currentPoints, points = extractPoints(points, now, archive.MaxRetention()) - if len(currentPoints) == 0 { - continue - } - // reverse currentPoints - reversePoints(currentPoints) - err = whisper.archiveUpdateMany(archive, currentPoints) - if err != nil { - return - } - - if len(points) == 0 { // nothing left to do - break - } - } - return -} - -func (whisper *Whisper) archiveUpdateMany(archive *archiveInfo, points []*TimeSeriesPoint) error { - alignedPoints := alignPoints(archive, points) - intervals, packedBlocks := packSequences(archive, alignedPoints) - - baseInterval := whisper.getBaseInterval(archive) - if baseInterval == 0 { - baseInterval = intervals[0] - } - - for i := range intervals { - myOffset := archive.PointOffset(baseInterval, intervals[i]) - bytesBeyond := int(myOffset-archive.End()) + len(packedBlocks[i]) - if bytesBeyond > 0 { - pos := len(packedBlocks[i]) - bytesBeyond - err := whisper.fileWriteAt(packedBlocks[i][:pos], myOffset) - if err != nil { - return err - } - err = whisper.fileWriteAt(packedBlocks[i][pos:], archive.Offset()) - if err != nil { - return err - } - } else { - err := whisper.fileWriteAt(packedBlocks[i], myOffset) - if err != nil { - return err - } - } - } - - higher := archive - lowerArchives := whisper.lowerArchives(archive) - - for _, lower := range lowerArchives { - seen := make(map[int]bool) - propagateFurther := false - for _, point := range alignedPoints { - interval := point.interval - mod(point.interval, lower.secondsPerPoint) - if !seen[interval] { - if propagated, err := whisper.propagate(interval, higher, lower); err != nil { - panic("Failed to propagate") - } else if propagated { - propagateFurther = true - } - } - } - if !propagateFurther { - break - } - higher = lower - } - return nil -} - -func extractPoints(points []*TimeSeriesPoint, now int, maxRetention int) (currentPoints []*TimeSeriesPoint, remainingPoints []*TimeSeriesPoint) { - maxAge := now - maxRetention - for i, point := range points { - if point.Time < maxAge { - if i > 0 { - return points[:i-1], points[i-1:] - } else { - return []*TimeSeriesPoint{}, points - } - } - } - return points, remainingPoints -} - -func alignPoints(archive *archiveInfo, points []*TimeSeriesPoint) []dataPoint { - alignedPoints := make([]dataPoint, 0, len(points)) - positions := make(map[int]int) - for _, point := range points { - dPoint := dataPoint{point.Time - mod(point.Time, archive.secondsPerPoint), point.Value} - if p, ok := positions[dPoint.interval]; ok { - alignedPoints[p] = dPoint - } else { - alignedPoints = append(alignedPoints, dPoint) - positions[dPoint.interval] = len(alignedPoints) - 1 - } - } - return alignedPoints -} - -func packSequences(archive *archiveInfo, points []dataPoint) (intervals []int, packedBlocks [][]byte) { - intervals = make([]int, 0) - packedBlocks = make([][]byte, 0) - for i, point := range points { - if i == 0 || point.interval != intervals[len(intervals)-1]+archive.secondsPerPoint { - intervals = append(intervals, point.interval) - packedBlocks = append(packedBlocks, point.Bytes()) - } else { - packedBlocks[len(packedBlocks)-1] = append(packedBlocks[len(packedBlocks)-1], point.Bytes()...) - } - } - return -} - -/* - Calculate the offset for a given interval in an archive - - This method retrieves the baseInterval and the -*/ -func (whisper *Whisper) getPointOffset(start int, archive *archiveInfo) int64 { - baseInterval := whisper.getBaseInterval(archive) - if baseInterval == 0 { - return archive.Offset() - } - return archive.PointOffset(baseInterval, start) -} - -func (whisper *Whisper) getBaseInterval(archive *archiveInfo) int { - baseInterval, err := whisper.readInt(archive.Offset()) - if err != nil { - panic("Failed to read baseInterval") - } - return baseInterval -} - -func (whisper *Whisper) lowerArchives(archive *archiveInfo) (lowerArchives []*archiveInfo) { - for i, lower := range whisper.archives { - if lower.secondsPerPoint > archive.secondsPerPoint { - return whisper.archives[i:] - } - } - return -} - -func (whisper *Whisper) propagate(timestamp int, higher, lower *archiveInfo) (bool, error) { - lowerIntervalStart := timestamp - mod(timestamp, lower.secondsPerPoint) - - higherFirstOffset := whisper.getPointOffset(lowerIntervalStart, higher) - - // TODO: extract all this series extraction stuff - higherPoints := lower.secondsPerPoint / higher.secondsPerPoint - higherSize := higherPoints * PointSize - relativeFirstOffset := higherFirstOffset - higher.Offset() - relativeLastOffset := int64(mod(int(relativeFirstOffset+int64(higherSize)), higher.Size())) - higherLastOffset := relativeLastOffset + higher.Offset() - - series, err := whisper.readSeries(higherFirstOffset, higherLastOffset, higher) - if err != nil { - return false, err - } - - // and finally we construct a list of values - knownValues := make([]float64, 0, len(series)) - currentInterval := lowerIntervalStart - - for _, dPoint := range series { - if dPoint.interval == currentInterval { - knownValues = append(knownValues, dPoint.value) - } - currentInterval += higher.secondsPerPoint - } - - // propagate aggregateValue to propagate from neighborValues if we have enough known points - if len(knownValues) == 0 { - return false, nil - } - knownPercent := float32(len(knownValues)) / float32(len(series)) - if knownPercent < whisper.xFilesFactor { // check we have enough data points to propagate a value - return false, nil - } else { - aggregateValue := aggregate(whisper.aggregationMethod, knownValues) - point := dataPoint{lowerIntervalStart, aggregateValue} - if _, err := whisper.file.WriteAt(point.Bytes(), whisper.getPointOffset(lowerIntervalStart, lower)); err != nil { - return false, err - } - } - return true, nil -} - -func (whisper *Whisper) readSeries(start, end int64, archive *archiveInfo) ([]dataPoint, error) { - var b []byte - if start < end { - b = make([]byte, end-start) - err := whisper.fileReadAt(b, start) - if err != nil { - return nil, err - } - } else { - b = make([]byte, archive.End()-start) - err := whisper.fileReadAt(b, start) - if err != nil { - return nil, err - } - b2 := make([]byte, end-archive.Offset()) - err = whisper.fileReadAt(b2, archive.Offset()) - if err != nil { - return nil, err - } - b = append(b, b2...) - } - return unpackDataPoints(b), nil -} - -func (whisper *Whisper) checkSeriesEmpty(start, end int64, archive *archiveInfo, fromTime, untilTime int) (bool, error) { - if start < end { - len := end - start - return whisper.checkSeriesEmptyAt(start, len, fromTime, untilTime) - } - len := archive.End() - start - empty, err := whisper.checkSeriesEmptyAt(start, len, fromTime, untilTime) - if err != nil || !empty { - return empty, err - } - return whisper.checkSeriesEmptyAt(archive.Offset(), end-archive.Offset(), fromTime, untilTime) - -} - -func (whisper *Whisper) checkSeriesEmptyAt(start, len int64, fromTime, untilTime int) (bool, error) { - b1 := make([]byte, 4) - // Read first point - err := whisper.fileReadAt(b1, start) - if err != nil { - return false, err - } - pointTime := unpackInt(b1) - if pointTime > fromTime && pointTime < untilTime { - return false, nil - } - - b2 := make([]byte, 4) - // Read last point - err = whisper.fileReadAt(b2, len-12) - if err != nil { - return false, err - } - pointTime = unpackInt(b1) - if pointTime > fromTime && pointTime < untilTime { - return false, nil - } - return true, nil -} - -/* - Calculate the starting time for a whisper db. -*/ -func (whisper *Whisper) StartTime() int { - now := int(time.Now().Unix()) // TODO: danger of 2030 something overflow - return now - whisper.maxRetention -} - -/* - Fetch a TimeSeries for a given time span from the file. -*/ -func (whisper *Whisper) Fetch(fromTime, untilTime int) (timeSeries *TimeSeries, err error) { - now := int(time.Now().Unix()) // TODO: danger of 2030 something overflow - if fromTime > untilTime { - return nil, fmt.Errorf("Invalid time interval: from time '%d' is after until time '%d'", fromTime, untilTime) - } - oldestTime := whisper.StartTime() - // range is in the future - if fromTime > now { - return nil, nil - } - // range is beyond retention - if untilTime < oldestTime { - return nil, nil - } - if fromTime < oldestTime { - fromTime = oldestTime - } - if untilTime > now { - untilTime = now - } - - // TODO: improve this algorithm it's ugly - diff := now - fromTime - var archive *archiveInfo - for _, archive = range whisper.archives { - if archive.MaxRetention() >= diff { - break - } - } - - fromInterval := archive.Interval(fromTime) - untilInterval := archive.Interval(untilTime) - baseInterval := whisper.getBaseInterval(archive) - - if baseInterval == 0 { - step := archive.secondsPerPoint - points := (untilInterval - fromInterval) / step - values := make([]float64, points) - for i := range values { - values[i] = math.NaN() - } - return &TimeSeries{fromInterval, untilInterval, step, values}, nil - } - - // Zero-length time range: always include the next point - if fromInterval == untilInterval { - untilInterval += archive.SecondsPerPoint() - } - - fromOffset := archive.PointOffset(baseInterval, fromInterval) - untilOffset := archive.PointOffset(baseInterval, untilInterval) - - series, err := whisper.readSeries(fromOffset, untilOffset, archive) - if err != nil { - return nil, err - } - - values := make([]float64, len(series)) - for i := range values { - values[i] = math.NaN() - } - currentInterval := fromInterval - step := archive.secondsPerPoint - - for i, dPoint := range series { - if dPoint.interval == currentInterval { - values[i] = dPoint.value - } - currentInterval += step - } - - return &TimeSeries{fromInterval, untilInterval, step, values}, nil -} - -/* - Check a TimeSeries has a points for a given time span from the file. -*/ -func (whisper *Whisper) CheckEmpty(fromTime, untilTime int) (exist bool, err error) { - now := int(time.Now().Unix()) // TODO: danger of 2030 something overflow - if fromTime > untilTime { - return true, fmt.Errorf("Invalid time interval: from time '%d' is after until time '%d'", fromTime, untilTime) - } - oldestTime := whisper.StartTime() - // range is in the future - if fromTime > now { - return true, nil - } - // range is beyond retention - if untilTime < oldestTime { - return true, nil - } - if fromTime < oldestTime { - fromTime = oldestTime - } - if untilTime > now { - untilTime = now - } - - // TODO: improve this algorithm it's ugly - diff := now - fromTime - var archive *archiveInfo - for _, archive = range whisper.archives { - fromInterval := archive.Interval(fromTime) - untilInterval := archive.Interval(untilTime) - baseInterval := whisper.getBaseInterval(archive) - - if baseInterval == 0 { - return true, nil - } - - // Zero-length time range: always include the next point - if fromInterval == untilInterval { - untilInterval += archive.SecondsPerPoint() - } - - fromOffset := archive.PointOffset(baseInterval, fromInterval) - untilOffset := archive.PointOffset(baseInterval, untilInterval) - - empty, err := whisper.checkSeriesEmpty(fromOffset, untilOffset, archive, fromTime, untilTime) - if err != nil || !empty { - return empty, err - } - if archive.MaxRetention() >= diff { - break - } - } - return true, nil -} - -func (whisper *Whisper) readInt(offset int64) (int, error) { - // TODO: make errors better - b := make([]byte, IntSize) - _, err := whisper.file.ReadAt(b, offset) - if err != nil { - return 0, err - } - - return unpackInt(b), nil -} - -/* - A retention level. - - Retention levels describe a given archive in the database. How detailed it is and how far back - it records. -*/ -type Retention struct { - secondsPerPoint int - numberOfPoints int -} - -func (retention *Retention) MaxRetention() int { - return retention.secondsPerPoint * retention.numberOfPoints -} - -func (retention *Retention) Size() int { - return retention.numberOfPoints * PointSize -} - -func (retention *Retention) SecondsPerPoint() int { - return retention.secondsPerPoint -} - -func (retention *Retention) NumberOfPoints() int { - return retention.numberOfPoints -} - -func NewRetention(secondsPerPoint, numberOfPoints int) Retention { - return Retention{ - secondsPerPoint, - numberOfPoints, - } -} - -type Retentions []*Retention - -func (r Retentions) Len() int { - return len(r) -} - -func (r Retentions) Swap(i, j int) { - r[i], r[j] = r[j], r[i] -} - -type retentionsByPrecision struct{ Retentions } - -func (r retentionsByPrecision) Less(i, j int) bool { - return r.Retentions[i].secondsPerPoint < r.Retentions[j].secondsPerPoint -} - -/* - Describes a time series in a file. - - The only addition this type has over a Retention is the offset at which it exists within the - whisper file. -*/ -type archiveInfo struct { - Retention - offset int -} - -func (archive *archiveInfo) Offset() int64 { - return int64(archive.offset) -} - -func (archive *archiveInfo) PointOffset(baseInterval, interval int) int64 { - timeDistance := interval - baseInterval - pointDistance := timeDistance / archive.secondsPerPoint - byteDistance := pointDistance * PointSize - myOffset := archive.Offset() + int64(mod(byteDistance, archive.Size())) - - return myOffset -} - -func (archive *archiveInfo) End() int64 { - return archive.Offset() + int64(archive.Size()) -} - -func (archive *archiveInfo) Interval(time int) int { - return time - mod(time, archive.secondsPerPoint) + archive.secondsPerPoint -} - -type TimeSeries struct { - fromTime int - untilTime int - step int - values []float64 -} - -func (ts *TimeSeries) FromTime() int { - return ts.fromTime -} - -func (ts *TimeSeries) UntilTime() int { - return ts.untilTime -} - -func (ts *TimeSeries) Step() int { - return ts.step -} - -func (ts *TimeSeries) Values() []float64 { - return ts.values -} - -func (ts *TimeSeries) Points() []TimeSeriesPoint { - points := make([]TimeSeriesPoint, len(ts.values)) - for i, value := range ts.values { - points[i] = TimeSeriesPoint{Time: ts.fromTime + ts.step*i, Value: value} - } - return points -} - -func (ts *TimeSeries) String() string { - return fmt.Sprintf("TimeSeries{'%v' '%-v' %v %v}", time.Unix(int64(ts.fromTime), 0), time.Unix(int64(ts.untilTime), 0), ts.step, ts.values) -} - -type TimeSeriesPoint struct { - Time int - Value float64 -} - -type timeSeriesPoints []*TimeSeriesPoint - -func (p timeSeriesPoints) Len() int { - return len(p) -} - -func (p timeSeriesPoints) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -type timeSeriesPointsNewestFirst struct { - timeSeriesPoints -} - -func (p timeSeriesPointsNewestFirst) Less(i, j int) bool { - return p.timeSeriesPoints[i].Time > p.timeSeriesPoints[j].Time -} - -type dataPoint struct { - interval int - value float64 -} - -func (point *dataPoint) Bytes() []byte { - b := make([]byte, PointSize) - packInt(b, point.interval, 0) - packFloat64(b, point.value, IntSize) - return b -} - -func sum(values []float64) float64 { - result := 0.0 - for _, value := range values { - result += value - } - return result -} - -func aggregate(method AggregationMethod, knownValues []float64) float64 { - switch method { - case Average: - return sum(knownValues) / float64(len(knownValues)) - case Sum: - return sum(knownValues) - case Last: - return knownValues[len(knownValues)-1] - case Max: - max := knownValues[0] - for _, value := range knownValues { - if value > max { - max = value - } - } - return max - case Min: - min := knownValues[0] - for _, value := range knownValues { - if value < min { - min = value - } - } - return min - } - panic("Invalid aggregation method") -} - -func packInt(b []byte, v, i int) int { - binary.BigEndian.PutUint32(b[i:i+IntSize], uint32(v)) - return IntSize -} - -func packFloat32(b []byte, v float32, i int) int { - binary.BigEndian.PutUint32(b[i:i+FloatSize], math.Float32bits(v)) - return FloatSize -} - -func packFloat64(b []byte, v float64, i int) int { - binary.BigEndian.PutUint64(b[i:i+Float64Size], math.Float64bits(v)) - return Float64Size -} - -func unpackInt(b []byte) int { - return int(binary.BigEndian.Uint32(b)) -} - -func unpackFloat32(b []byte) float32 { - return math.Float32frombits(binary.BigEndian.Uint32(b)) -} - -func unpackFloat64(b []byte) float64 { - return math.Float64frombits(binary.BigEndian.Uint64(b)) -} - -func unpackArchiveInfo(b []byte) *archiveInfo { - return &archiveInfo{Retention{unpackInt(b[IntSize : IntSize*2]), unpackInt(b[IntSize*2 : IntSize*3])}, unpackInt(b[:IntSize])} -} - -func unpackDataPoint(b []byte) dataPoint { - return dataPoint{unpackInt(b[0:IntSize]), unpackFloat64(b[IntSize:PointSize])} -} - -func unpackDataPoints(b []byte) (series []dataPoint) { - series = make([]dataPoint, 0, len(b)/PointSize) - for i := 0; i < len(b); i += PointSize { - series = append(series, unpackDataPoint(b[i:i+PointSize])) - } - return -} - -/* - Implementation of modulo that works like Python - Thanks @timmow for this -*/ -func mod(a, b int) int { - return a - (b * int(math.Floor(float64(a)/float64(b)))) -} diff --git a/vendor/github.com/lomik/go-carbon/_vendor/src/github.com/lomik/go-carbon b/vendor/github.com/lomik/go-carbon/_vendor/src/github.com/lomik/go-carbon deleted file mode 120000 index c866b868..00000000 --- a/vendor/github.com/lomik/go-carbon/_vendor/src/github.com/lomik/go-carbon +++ /dev/null @@ -1 +0,0 @@ -../../../.. \ No newline at end of file diff --git a/vendor/github.com/lomik/go-carbon/carbonserver/LICENSE.md b/vendor/github.com/lomik/go-carbon/carbonserver/LICENSE.md deleted file mode 100644 index f433b1a5..00000000 --- a/vendor/github.com/lomik/go-carbon/carbonserver/LICENSE.md +++ /dev/null @@ -1,177 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/lomik/go-carbon/helper/atomic_utils.go b/vendor/github.com/lomik/go-carbon/helper/atomic_utils.go deleted file mode 100644 index 7d67d47f..00000000 --- a/vendor/github.com/lomik/go-carbon/helper/atomic_utils.go +++ /dev/null @@ -1,33 +0,0 @@ -package helper - -import "sync/atomic" - -type StatCallback func(metric string, value float64) - -func SendAndSubstractUint64(metric string, v *uint64, send StatCallback) { - res := atomic.LoadUint64(v) - atomic.AddUint64(v, ^uint64(res-1)) - send(metric, float64(res)) -} - -func SendUint64(metric string, v *uint64, send StatCallback) { - res := atomic.LoadUint64(v) - send(metric, float64(res)) -} - -func SendUint32(metric string, v *uint32, send StatCallback) { - res := atomic.LoadUint32(v) - send(metric, float64(res)) -} - -func SendAndSubstractUint32(metric string, v *uint32, send StatCallback) { - res := atomic.LoadUint32(v) - atomic.AddUint32(v, ^uint32(res-1)) - send(metric, float64(res)) -} - -func SendAndZeroIfNotUpdatedUint32(metric string, v *uint32, send StatCallback) { - res := atomic.LoadUint32(v) - atomic.CompareAndSwapUint32(v, res, 0) - send(metric, float64(res)) -} diff --git a/vendor/github.com/lomik/go-carbon/helper/stoppable.go b/vendor/github.com/lomik/go-carbon/helper/stoppable.go deleted file mode 100644 index 2d4a7d3c..00000000 --- a/vendor/github.com/lomik/go-carbon/helper/stoppable.go +++ /dev/null @@ -1,87 +0,0 @@ -package helper - -import "sync" - -type StoppableInterface interface { -} - -// Stoppable is abstract class with Start/Stop methods -type Stoppable struct { - sync.RWMutex - exit chan bool - wg sync.WaitGroup - Go func(callable func(exit chan bool)) - WithExit func(callable func(exit chan bool)) -} - -// Start ... -func (s *Stoppable) Start() { - s.StartFunc(func() error { return nil }) -} - -// Stop ... -func (s *Stoppable) Stop() { - s.StopFunc(func() {}) -} - -// StartFunc ... -func (s *Stoppable) StartFunc(startProcedure func() error) error { - s.Lock() - defer s.Unlock() - - // already started - if s.exit != nil { - return nil - } - - exit := make(chan bool) - s.exit = exit - s.Go = func(callable func(exit chan bool)) { - s.wg.Add(1) - go func() { - callable(exit) - s.wg.Done() - }() - } - s.WithExit = func(callable func(exit chan bool)) { - callable(exit) - } - - err := startProcedure() - - // stop all if start failed - if err != nil { - s.doStop(func() {}) - } - - return err -} - -func (s *Stoppable) doStop(callable func()) { - // already stopped - if s.exit == nil { - return - } - - close(s.exit) - callable() - s.wg.Wait() - s.exit = nil - s.Go = func(callable func(exit chan bool)) {} - s.WithExit = func(callable func(exit chan bool)) { - callable(nil) - } -} - -// StopFunc ... -func (s *Stoppable) StopFunc(callable func()) { - s.Lock() - defer s.Unlock() - - // already stopped - if s.exit == nil { - return - } - - s.doStop(callable) -} diff --git a/vendor/github.com/lomik/go-carbon/persister/throttle.go b/vendor/github.com/lomik/go-carbon/persister/throttle.go deleted file mode 100644 index 65383641..00000000 --- a/vendor/github.com/lomik/go-carbon/persister/throttle.go +++ /dev/null @@ -1,69 +0,0 @@ -package persister - -import ( - "time" - - "github.com/lomik/go-carbon/helper" -) - -type ThrottleTicker struct { - helper.Stoppable - C chan bool -} - -func NewThrottleTicker(ratePerSec int) *ThrottleTicker { - t := &ThrottleTicker{ - C: make(chan bool, ratePerSec), - } - - t.Start() - - if ratePerSec <= 0 { - close(t.C) - return t - } - - t.Go(func(exit chan bool) { - defer close(t.C) - - delimeter := ratePerSec - chunk := 1 - - if ratePerSec > 1000 { - minRemainder := ratePerSec - - for i := 100; i < 1000; i++ { - if ratePerSec%i < minRemainder { - delimeter = i - minRemainder = ratePerSec % delimeter - } - } - - chunk = ratePerSec / delimeter - } - - step := time.Duration(1e9/delimeter) * time.Nanosecond - - ticker := time.NewTicker(step) - defer ticker.Stop() - - LOOP: - for { - select { - case <-ticker.C: - for i := 0; i < chunk; i++ { - select { - case t.C <- true: - //pass - case <-exit: - break LOOP - } - } - case <-exit: - break LOOP - } - } - }) - - return t -} diff --git a/vendor/github.com/lomik/go-carbon/persister/whisper.go b/vendor/github.com/lomik/go-carbon/persister/whisper.go deleted file mode 100644 index 99019eee..00000000 --- a/vendor/github.com/lomik/go-carbon/persister/whisper.go +++ /dev/null @@ -1,352 +0,0 @@ -package persister - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "sync/atomic" - - whisper "github.com/go-graphite/go-whisper" - "go.uber.org/zap" - - "github.com/lomik/go-carbon/helper" - "github.com/lomik/go-carbon/points" - "github.com/lomik/go-carbon/tags" - "github.com/lomik/zapwriter" -) - -const storeMutexCount = 32768 - -type StoreFunc func(metric string) - -// Whisper write data to *.wsp files -type Whisper struct { - helper.Stoppable - recv func(chan bool) string - pop func(string) (*points.Points, bool) - confirm func(*points.Points) - onCreateTagged func(string) - tagsEnabled bool - schemas WhisperSchemas - aggregation *WhisperAggregation - workersCount int - rootPath string - created uint32 // counter - throttledCreates uint32 // counter - updateOperations uint32 // counter - committedPoints uint32 // counter - sparse bool - flock bool - hashFilenames bool - maxUpdatesPerSecond int - maxCreatesPerSecond int - throttleTicker *ThrottleTicker - maxCreatesTicker *ThrottleTicker - storeMutex [storeMutexCount]sync.Mutex - mockStore func() (StoreFunc, func()) - logger *zap.Logger - createLogger *zap.Logger - // blockThrottleNs uint64 // sum ns counter - // blockQueueGetNs uint64 // sum ns counter - // blockAvoidConcurrentNs uint64 // sum ns counter - // blockUpdateManyNs uint64 // sum ns counter -} - -// NewWhisper create instance of Whisper -func NewWhisper( - rootPath string, - schemas WhisperSchemas, - aggregation *WhisperAggregation, - recv func(chan bool) string, - pop func(string) (*points.Points, bool), - confirm func(*points.Points)) *Whisper { - - return &Whisper{ - recv: recv, - pop: pop, - confirm: confirm, - schemas: schemas, - aggregation: aggregation, - workersCount: 1, - rootPath: rootPath, - maxUpdatesPerSecond: 0, - logger: zapwriter.Logger("persister"), - createLogger: zapwriter.Logger("whisper:new"), - } -} - -// SetMaxUpdatesPerSecond enable throttling -func (p *Whisper) SetMaxUpdatesPerSecond(maxUpdatesPerSecond int) { - p.maxUpdatesPerSecond = maxUpdatesPerSecond -} - -// SetMaxCreatesPerSecond enable throttling -func (p *Whisper) SetMaxCreatesPerSecond(maxCreatesPerSecond int) { - p.maxCreatesPerSecond = maxCreatesPerSecond -} - -// GetMaxUpdatesPerSecond returns current throttling speed -func (p *Whisper) GetMaxUpdatesPerSecond() int { - return p.maxUpdatesPerSecond -} - -// SetWorkers count -func (p *Whisper) SetWorkers(count int) { - if count >= 1 { - p.workersCount = count - } else { - p.workersCount = 1 - } -} - -// SetSparse creation -func (p *Whisper) SetSparse(sparse bool) { - p.sparse = sparse -} - -// SetFLock on create and open -func (p *Whisper) SetFLock(flock bool) { - p.flock = flock -} - -func (p *Whisper) SetHashFilenames(v bool) { - p.hashFilenames = v -} - -func (p *Whisper) SetMockStore(fn func() (StoreFunc, func())) { - p.mockStore = fn -} - -func (p *Whisper) SetTagsEnabled(v bool) { - p.tagsEnabled = v -} - -func (p *Whisper) SetOnCreateTagged(fn func(string)) { - p.onCreateTagged = fn -} - -func fnv32(key string) uint32 { - hash := uint32(2166136261) - const prime32 = uint32(16777619) - for i := 0; i < len(key); i++ { - hash *= prime32 - hash ^= uint32(key[i]) - } - return hash -} - -func (p *Whisper) updateMany(w *whisper.Whisper, path string, points []*whisper.TimeSeriesPoint) { - defer func() { - if r := recover(); r != nil { - p.logger.Error("UpdateMany panic recovered", - zap.String("path", path), - zap.String("traceback", fmt.Sprint(r)), - ) - } - }() - - // start = time.Now() - w.UpdateMany(points) -} - -func (p *Whisper) store(metric string) { - // avoid concurrent store same metric - // @TODO: may be flock? - // start := time.Now() - mutexIndex := fnv32(metric) % storeMutexCount - p.storeMutex[mutexIndex].Lock() - // atomic.AddUint64(&p.blockAvoidConcurrentNs, uint64(time.Since(start).Nanoseconds())) - defer p.storeMutex[mutexIndex].Unlock() - - var path string - if p.tagsEnabled && strings.IndexByte(metric, ';') >= 0 { - path = tags.FilePath(p.rootPath, metric, p.hashFilenames) + ".wsp" - } else { - path = filepath.Join(p.rootPath, strings.Replace(metric, ".", "/", -1)+".wsp") - } - - w, err := whisper.OpenWithOptions(path, &whisper.Options{ - FLock: p.flock, - }) - if err != nil { - // create new whisper if file not exists - if !os.IsNotExist(err) { - p.logger.Error("failed to open whisper file", zap.String("path", path), zap.Error(err)) - return - } - - // max creates throttling - select { - case <-p.maxCreatesTicker.C: - // pass - default: - atomic.AddUint32(&p.throttledCreates, 1) - return - } - - schema, ok := p.schemas.Match(metric) - if !ok { - p.logger.Error("no storage schema defined for metric", zap.String("metric", metric)) - return - } - - aggr := p.aggregation.match(metric) - if aggr == nil { - p.logger.Error("no storage aggregation defined for metric", zap.String("metric", metric)) - return - } - - if err = os.MkdirAll(filepath.Dir(path), os.ModeDir|os.ModePerm); err != nil { - p.logger.Error("mkdir failed", - zap.String("dir", filepath.Dir(path)), - zap.Error(err), - zap.String("path", path), - ) - return - } - - w, err = whisper.CreateWithOptions(path, schema.Retentions, aggr.aggregationMethod, float32(aggr.xFilesFactor), &whisper.Options{ - Sparse: p.sparse, - FLock: p.flock, - }) - if err != nil { - p.logger.Error("create new whisper file failed", - zap.String("path", path), - zap.Error(err), - zap.String("retention", schema.RetentionStr), - zap.String("schema", schema.Name), - zap.String("aggregation", aggr.name), - zap.Float64("xFilesFactor", aggr.xFilesFactor), - zap.String("method", aggr.aggregationMethodStr), - ) - return - } - - if p.tagsEnabled && p.onCreateTagged != nil && strings.IndexByte(metric, ';') >= 0 { - p.onCreateTagged(metric) - } - - p.createLogger.Debug("created", - zap.String("path", path), - zap.String("retention", schema.RetentionStr), - zap.String("schema", schema.Name), - zap.String("aggregation", aggr.name), - zap.Float64("xFilesFactor", aggr.xFilesFactor), - zap.String("method", aggr.aggregationMethodStr), - ) - - atomic.AddUint32(&p.created, 1) - } - - values, exists := p.pop(metric) - if !exists { - return - } - if p.confirm != nil { - defer p.confirm(values) - } - - points := make([]*whisper.TimeSeriesPoint, len(values.Data)) - for i, r := range values.Data { - points[i] = &whisper.TimeSeriesPoint{Time: int(r.Timestamp), Value: r.Value} - } - - atomic.AddUint32(&p.committedPoints, uint32(len(values.Data))) - atomic.AddUint32(&p.updateOperations, 1) - - // start = time.Now() - p.updateMany(w, path, points) - - w.Close() - // atomic.AddUint64(&p.blockUpdateManyNs, uint64(time.Since(start).Nanoseconds())) -} - -func (p *Whisper) worker(exit chan bool) { - storeFunc := p.store - var doneCb func() - if p.mockStore != nil { - storeFunc, doneCb = p.mockStore() - } - -LOOP: - for { - // start := time.Now() - select { - case <-p.throttleTicker.C: - // atomic.AddUint64(&p.blockThrottleNs, uint64(time.Since(start).Nanoseconds())) - // pass - case <-exit: - return - } - - // start = time.Now() - metric := p.recv(exit) - // atomic.AddUint64(&p.blockQueueGetNs, uint64(time.Since(start).Nanoseconds())) - if metric == "" { - // exit closed - break LOOP - } - storeFunc(metric) - if doneCb != nil { - doneCb() - } - } -} - -// Stat callback -func (p *Whisper) Stat(send helper.StatCallback) { - updateOperations := atomic.LoadUint32(&p.updateOperations) - committedPoints := atomic.LoadUint32(&p.committedPoints) - atomic.AddUint32(&p.updateOperations, -updateOperations) - atomic.AddUint32(&p.committedPoints, -committedPoints) - - created := atomic.LoadUint32(&p.created) - atomic.AddUint32(&p.created, -created) - - throttledCreates := atomic.LoadUint32(&p.throttledCreates) - atomic.AddUint32(&p.throttledCreates, -throttledCreates) - - send("updateOperations", float64(updateOperations)) - send("committedPoints", float64(committedPoints)) - if updateOperations > 0 { - send("pointsPerUpdate", float64(committedPoints)/float64(updateOperations)) - } else { - send("pointsPerUpdate", 0.0) - } - - send("created", float64(created)) - send("throttledCreates", float64(throttledCreates)) - - send("maxUpdatesPerSecond", float64(p.maxUpdatesPerSecond)) - send("workers", float64(p.workersCount)) - - // helper.SendAndSubstractUint64("blockThrottleNs", &p.blockThrottleNs, send) - // helper.SendAndSubstractUint64("blockQueueGetNs", &p.blockQueueGetNs, send) - // helper.SendAndSubstractUint64("blockAvoidConcurrentNs", &p.blockAvoidConcurrentNs, send) - // helper.SendAndSubstractUint64("blockUpdateManyNs", &p.blockUpdateManyNs, send) - -} - -// Start worker -func (p *Whisper) Start() error { - return p.StartFunc(func() error { - - p.throttleTicker = NewThrottleTicker(p.maxUpdatesPerSecond) - p.maxCreatesTicker = NewThrottleTicker(p.maxCreatesPerSecond) - - for i := 0; i < p.workersCount; i++ { - p.Go(p.worker) - } - - return nil - }) -} - -func (p *Whisper) Stop() { - p.StopFunc(func() { - p.throttleTicker.Stop() - p.maxCreatesTicker.Stop() - }) -} diff --git a/vendor/github.com/lomik/go-carbon/persister/whisper_aggregation.go b/vendor/github.com/lomik/go-carbon/persister/whisper_aggregation.go deleted file mode 100644 index d5557828..00000000 --- a/vendor/github.com/lomik/go-carbon/persister/whisper_aggregation.go +++ /dev/null @@ -1,101 +0,0 @@ -package persister - -/* -Schemas read code from https://github.com/grobian/carbonwriter/ -*/ - -import ( - "fmt" - "regexp" - "strconv" - - whisper "github.com/go-graphite/go-whisper" -) - -type whisperAggregationItem struct { - name string - pattern *regexp.Regexp - xFilesFactor float64 - aggregationMethodStr string - aggregationMethod whisper.AggregationMethod -} - -// WhisperAggregation ... -type WhisperAggregation struct { - Data []*whisperAggregationItem - Default *whisperAggregationItem -} - -// NewWhisperAggregation create instance of WhisperAggregation -func NewWhisperAggregation() *WhisperAggregation { - return &WhisperAggregation{ - Data: make([]*whisperAggregationItem, 0), - Default: &whisperAggregationItem{ - name: "default", - pattern: nil, - xFilesFactor: 0.5, - aggregationMethodStr: "average", - aggregationMethod: whisper.Average, - }, - } -} - -// ReadWhisperAggregation ... -func ReadWhisperAggregation(filename string) (*WhisperAggregation, error) { - config, err := parseIniFile(filename) - if err != nil { - return nil, err - } - - result := NewWhisperAggregation() - - for _, section := range config { - item := &whisperAggregationItem{} - // this is mildly stupid, but I don't feel like forking - // configparser just for this - item.name = section["name"] - - item.pattern, err = regexp.Compile(section["pattern"]) - if err != nil { - return nil, fmt.Errorf("failed to parse pattern %#v for [%s]: %s", - section["pattern"], item.name, err.Error()) - } - - item.xFilesFactor, err = strconv.ParseFloat(section["xfilesfactor"], 64) - if err != nil { - return nil, fmt.Errorf("failed to parse xFilesFactor %#v in %s: %s", - section["xfilesfactor"], item.name, err.Error()) - } - - item.aggregationMethodStr = section["aggregationmethod"] - switch item.aggregationMethodStr { - case "average", "avg": - item.aggregationMethod = whisper.Average - case "sum": - item.aggregationMethod = whisper.Sum - case "last": - item.aggregationMethod = whisper.Last - case "max": - item.aggregationMethod = whisper.Max - case "min": - item.aggregationMethod = whisper.Min - default: - return nil, fmt.Errorf("unknown aggregation method '%s'", - section["aggregationmethod"]) - } - - result.Data = append(result.Data, item) - } - - return result, nil -} - -// Match find schema for metric -func (a *WhisperAggregation) match(metric string) *whisperAggregationItem { - for _, s := range a.Data { - if s.pattern.MatchString(metric) { - return s - } - } - return a.Default -} diff --git a/vendor/github.com/lomik/go-carbon/points/glue.go b/vendor/github.com/lomik/go-carbon/points/glue.go deleted file mode 100644 index b928895d..00000000 --- a/vendor/github.com/lomik/go-carbon/points/glue.go +++ /dev/null @@ -1,55 +0,0 @@ -package points - -import ( - "bytes" - "fmt" - "time" -) - -func Glue(exit chan bool, in chan *Points, chunkSize int, chunkTimeout time.Duration, callback func([]byte)) { - var p *Points - var ok bool - - buf := bytes.NewBuffer(nil) - - flush := func() { - if buf.Len() == 0 { - return - } - callback(buf.Bytes()) - buf = bytes.NewBuffer(nil) - } - - ticker := time.NewTicker(chunkTimeout) - defer ticker.Stop() - - for { - p = nil - select { - case p, ok = <-in: - if !ok { // in chan closed - flush() - return - } - // pass - case <-ticker.C: - flush() - case <-exit: - return - } - - if p == nil { - continue - } - - for _, d := range p.Data { - s := fmt.Sprintf("%s %v %v\n", p.Metric, d.Value, d.Timestamp) - - if buf.Len()+len(s) > chunkSize { - flush() - } - buf.Write([]byte(s)) - } - } - -} diff --git a/vendor/github.com/lomik/go-carbon/points/points.go b/vendor/github.com/lomik/go-carbon/points/points.go deleted file mode 100644 index 7c164ae4..00000000 --- a/vendor/github.com/lomik/go-carbon/points/points.go +++ /dev/null @@ -1,202 +0,0 @@ -package points - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "strconv" - "strings" - "time" -) - -// Point value/time pair -type Point struct { - Value float64 - Timestamp int64 -} - -// Points from carbon clients -type Points struct { - Metric string - Data []Point -} - -// New creates new instance of Points -func New() *Points { - return &Points{} -} - -// OnePoint create Points instance with single point -func OnePoint(metric string, value float64, timestamp int64) *Points { - return &Points{ - Metric: metric, - Data: []Point{ - Point{ - Value: value, - Timestamp: timestamp, - }, - }, - } -} - -// NowPoint create OnePoint with now timestamp -func NowPoint(metric string, value float64) *Points { - return OnePoint(metric, value, time.Now().Unix()) -} - -// Copy returns copy of object -func (p *Points) Copy() *Points { - return &Points{ - Metric: p.Metric, - Data: p.Data, - } -} - -func (p *Points) WriteTo(w io.Writer) (n int64, err error) { - var c int - for _, d := range p.Data { // every metric point - c, err = w.Write([]byte(fmt.Sprintf("%s %v %v\n", p.Metric, d.Value, d.Timestamp))) - n += int64(c) - if err != nil { - return - } - } - return -} - -func encodeVarint(value int64) []byte { - var buf [10]byte - l := binary.PutVarint(buf[:], value) - return buf[:l] -} - -func (p *Points) WriteBinaryTo(w io.Writer) (n int, err error) { - var c int - - writeVarint := func(value int64) bool { - c, err = w.Write(encodeVarint(value)) - n += c - if err != nil { - return false - } - return true - } - - if !writeVarint(int64(len(p.Metric))) { - return - } - - c, err = io.WriteString(w, p.Metric) - n += c - if err != nil { - return - } - - if !writeVarint(int64(len(p.Data))) { - return - } - - if len(p.Data) > 0 { - if !writeVarint(int64(math.Float64bits(p.Data[0].Value))) { - return - } - if !writeVarint(p.Data[0].Timestamp) { - return - } - } - - for i := 1; i < len(p.Data); i++ { - if !writeVarint(int64(math.Float64bits(p.Data[i].Value)) - int64(math.Float64bits(p.Data[i-1].Value))) { - return - } - if !writeVarint(p.Data[i].Timestamp - p.Data[i-1].Timestamp) { - return - } - } - - return -} - -// ParseText parse text protocol Point -// host.Point.value 42 1422641531\n -func ParseText(line string) (*Points, error) { - - row := strings.Split(strings.Trim(line, "\n \t\r"), " ") - if len(row) != 3 { - return nil, fmt.Errorf("bad message: %#v", line) - } - - // 0x2e == ".". Or use split? @TODO: benchmark - // if strings.Contains(row[0], "..") || row[0][0] == 0x2e || row[0][len(row)-1] == 0x2e { - // return nil, fmt.Errorf("bad message: %#v", line) - // } - - value, err := strconv.ParseFloat(row[1], 64) - - if err != nil || math.IsNaN(value) { - return nil, fmt.Errorf("bad message: %#v", line) - } - - tsf, err := strconv.ParseFloat(row[2], 64) - - if err != nil || math.IsNaN(tsf) { - return nil, fmt.Errorf("bad message: %#v", line) - } - - // 315522000 == "1980-01-01 00:00:00" - // if tsf < 315532800 { - // return nil, fmt.Errorf("bad message: %#v", line) - // } - - // 4102444800 = "2100-01-01 00:00:00" - // Hello people from the future - // if tsf > 4102444800 { - // return nil, fmt.Errorf("bad message: %#v", line) - // } - - return OnePoint(row[0], value, int64(tsf)), nil -} - -// Append point -func (p *Points) Append(onePoint Point) *Points { - p.Data = append(p.Data, onePoint) - return p -} - -// Add value/timestamp pair to points -func (p *Points) Add(value float64, timestamp int64) *Points { - p.Data = append(p.Data, Point{ - Value: value, - Timestamp: timestamp, - }) - return p -} - -// Eq points check -func (p *Points) Eq(other *Points) bool { - if other == nil { - return false - } - if p.Metric != other.Metric { - return false - } - if p.Data == nil && other.Data == nil { - return true - } - if (p.Data == nil || other.Data == nil) && (p.Data != nil || other.Data != nil) { - return false - } - if len(p.Data) != len(other.Data) { - return false - } - for i := 0; i < len(p.Data); i++ { - if p.Data[i].Value != other.Data[i].Value { - return false - } - if p.Data[i].Timestamp != other.Data[i].Timestamp { - return false - } - } - return true -} diff --git a/vendor/github.com/lomik/go-carbon/points/reader.go b/vendor/github.com/lomik/go-carbon/points/reader.go deleted file mode 100644 index a7d4d81f..00000000 --- a/vendor/github.com/lomik/go-carbon/points/reader.go +++ /dev/null @@ -1,116 +0,0 @@ -package points - -import ( - "bufio" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "os" - "strings" -) - -const MB = 1048576 - -func ReadPlain(r io.Reader, callback func(*Points)) error { - reader := bufio.NewReaderSize(r, MB) - - for { - line, err := reader.ReadBytes('\n') - - if err != nil && err != io.EOF { - return err - } - - if len(line) == 0 { - break - } - - if line[len(line)-1] != '\n' { - return errors.New("unfinished line in file") - } - - p, err := ParseText(string(line)) - - if err == nil { - callback(p) - } - } - - return nil -} - -func ReadBinary(r io.Reader, callback func(*Points)) error { - reader := bufio.NewReaderSize(r, MB) - var p *Points - buf := make([]byte, MB) - - flush := func() { - if p != nil { - callback(p) - p = nil - } - } - - defer flush() - - for { - flush() - - l, err := binary.ReadVarint(reader) - if l > MB { - return fmt.Errorf("metric name too long: %d", l) - } - - if err == io.EOF { - break - } - - if err != nil { - return err - } - - io.ReadAtLeast(reader, buf[:l], int(l)) - - cnt, err := binary.ReadVarint(reader) - - var v, t, v0, t0 int64 - - for i := int64(0); i < cnt; i++ { - v0, err = binary.ReadVarint(reader) - if err != nil { - return err - } - v += v0 - - t0, err = binary.ReadVarint(reader) - if err != nil { - return err - } - t += t0 - - if i == int64(0) { - p = OnePoint(string(buf[:l]), math.Float64frombits(uint64(v)), t) - } else { - p.Add(math.Float64frombits(uint64(v)), t) - } - } - } - - return nil -} - -func ReadFromFile(filename string, callback func(*Points)) error { - file, err := os.Open(filename) - if err != nil { - return err - } - defer file.Close() - - if strings.HasSuffix(strings.ToLower(filename), ".bin") { - return ReadBinary(file, callback) - } - - return ReadPlain(file, callback) -} diff --git a/vendor/github.com/lomik/go-carbon/tags/normalize.go b/vendor/github.com/lomik/go-carbon/tags/normalize.go deleted file mode 100644 index c021fb12..00000000 --- a/vendor/github.com/lomik/go-carbon/tags/normalize.go +++ /dev/null @@ -1,109 +0,0 @@ -package tags - -import ( - "crypto/sha256" - "fmt" - "path/filepath" - "sort" - "strings" -) - -// as in https://github.com/graphite-project/carbon/blob/master/lib/carbon/util.py -func normalizeOriginal(s string) (string, error) { - arr := strings.Split(s, ";") - - if len(arr[0]) == 0 { - return "", fmt.Errorf("cannot parse path %#v, no metric found", s) - } - - tags := make(map[string]string) - - for i := 1; i < len(arr); i++ { - kv := strings.SplitN(arr[i], "=", 2) - - if len(kv) != 2 || len(kv[0]) == 0 { - return "", fmt.Errorf("cannot parse path %#v, invalid segment %#v", s, arr[i]) - } - - tags[kv[0]] = kv[1] - } - - tmp := make([]string, len(tags)) - i := 0 - for k, v := range tags { - tmp[i] = fmt.Sprintf(";%s=%s", k, v) - i++ - } - - sort.Strings(tmp) - - result := arr[0] + strings.Join(tmp, "") - - return result, nil -} - -type byKey []string - -func (a byKey) Len() int { return len(a) } -func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byKey) Less(i, j int) bool { - p1 := strings.Index(a[i], "=") - if p1 < 0 { - p1 = len(a[i]) - } - p2 := strings.Index(a[j], "=") - if p2 < 0 { - p2 = len(a[j]) - } - - return strings.Compare(a[i][:p1+1], a[j][:p2+1]) < 0 -} - -func Normalize(s string) (string, error) { - if strings.IndexByte(s, ';') < 0 { - return s, nil - } - - arr := strings.Split(s, ";") - - if len(arr[0]) == 0 { - return "", fmt.Errorf("cannot parse path %#v, no metric found", s) - } - - // check tags - for i := 1; i < len(arr); i++ { - if strings.Index(arr[i], "=") < 1 { - return "", fmt.Errorf("cannot parse path %#v, invalid segment %#v", s, arr[i]) - } - } - - sort.Stable(byKey(arr[1:])) - - // uniq - toDel := 0 - prevKey := "" - for i := 1; i < len(arr); i++ { - p := strings.Index(arr[i], "=") - key := arr[i][:p] - if key == prevKey { - toDel++ - } else { - prevKey = key - } - if toDel > 0 { - arr[i-toDel] = arr[i] - } - } - - return strings.Join(arr[:len(arr)-toDel], ";"), nil -} - -func FilePath(root string, s string, hashOnly bool) string { - sum := sha256.Sum256([]byte(s)) - hash := fmt.Sprintf("%x", sum) - if hashOnly == true { - return filepath.Join(root, "_tagged", hash[:3], hash[3:6], hash) - } else { - return filepath.Join(root, "_tagged", hash[:3], hash[3:6], strings.Replace(s, ".", "_DOT_", -1)) - } -} diff --git a/vendor/github.com/lomik/go-carbon/tags/queue.go b/vendor/github.com/lomik/go-carbon/tags/queue.go deleted file mode 100644 index 09f1ce0e..00000000 --- a/vendor/github.com/lomik/go-carbon/tags/queue.go +++ /dev/null @@ -1,256 +0,0 @@ -package tags - -import ( - "encoding/binary" - "fmt" - "os" - "path/filepath" - "strings" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/opt" - "go.uber.org/zap" - - "github.com/lomik/go-carbon/helper" - "github.com/lomik/zapwriter" -) - -type Queue struct { - helper.Stoppable - db *leveldb.DB - logger *zap.Logger - changed chan struct{} - send func([]string) error - sendChunk int - - stat struct { - putErrors uint32 - putCount uint32 - deleteErrors uint32 - deleteCount uint32 - sendFail uint32 - sendSuccess uint32 - } -} - -// exists returns whether the given file or directory exists or not -func exists(path string) bool { - _, err := os.Stat(path) - if err == nil { - return true - } - if os.IsNotExist(err) { - return false - } - return true -} - -func NewQueue(rootPath string, send func([]string) error, sendChunk int) (*Queue, error) { - if send == nil { - return nil, fmt.Errorf("send callback not set") - } - - o := &opt.Options{} - - queueDir := filepath.Join(rootPath, "queue") - - db, err := leveldb.OpenFile(queueDir, o) - - logger := zapwriter.Logger("tags").With(zap.String("path", queueDir)) - - if err != nil { - logger.Error("can't open queue database", - zap.Error(err), - ) - - if !exists(queueDir) { - return nil, err - } - - // try to recover - logger.Info("queue directory exists, try to recover") - moveTo := filepath.Join(rootPath, fmt.Sprintf("queue_corrupted_%d", time.Now().UnixNano())) - err = os.Rename(queueDir, moveTo) - if err != nil { - logger.Error("move corrupted queue failed", - zap.String("dst", moveTo), - zap.Error(err), - ) - - return nil, err - } - - // corrupted database moved, open new - db, err = leveldb.OpenFile(queueDir, o) - if err != nil { - logger.Error("can't create new queue database", - zap.Error(err), - ) - return nil, err - } - } - - if sendChunk < 1 { - sendChunk = 1 - } - - q := &Queue{ - db: db, - logger: logger, - changed: make(chan struct{}, 1), - send: send, - sendChunk: sendChunk, - } - - q.Start() - - q.Go(q.sendWorker) - - return q, nil -} - -func (q *Queue) Stop() { - q.StopFunc(func() {}) - q.db.Close() -} - -func (q *Queue) Add(metric string) { - // skip not tagged data - if strings.IndexByte(metric, ';') < 0 { - return - } - - key := make([]byte, len(metric)+8) - - binary.BigEndian.PutUint64(key[:8], uint64(time.Now().UnixNano())) - copy(key[8:], metric) - - err := q.db.Put(key, []byte("{}"), nil) - atomic.AddUint32(&q.stat.putCount, 1) - - if err != nil { - atomic.AddUint32(&q.stat.putErrors, 1) - q.logger.Error("write to queue database failed", zap.Error(err)) - } - - select { - case q.changed <- struct{}{}: - // pass - default: - // pass - } -} - -func (q *Queue) Lag() time.Duration { - iter := q.db.NewIterator(nil, nil) - - var res time.Duration - - if iter.Next() { - key := iter.Key() - if len(key) >= 8 { - t := int64(binary.BigEndian.Uint64(key[:8])) - tm := time.Unix(t/1000000000, t%1000000000) - res = time.Since(tm) - } - } - - iter.Release() - - return res -} - -func (q *Queue) sendAll(exit chan bool) { - iter := q.db.NewIterator(nil, nil) - defer iter.Release() - - keys := make([][]byte, q.sendChunk) - series := make([]string, q.sendChunk) - used := 0 - - flush := func() error { - if used <= 0 { - return nil - } - - for i := 0; i < used; i++ { - series[i] = string(keys[i][8:]) - } - - err := q.send(series[:used]) - - if err != nil { - atomic.AddUint32(&q.stat.sendFail, uint32(used)) - used = 0 - return err - } - - atomic.AddUint32(&q.stat.sendSuccess, uint32(used)) - - for i := 0; i < used; i++ { - q.delete(keys[i]) - } - used = 0 - return nil - } - - for iter.Next() { - select { - case <-exit: - return - default: - } - - // Remember that the contents of the returned slice should not be modified, and - // only valid until the next call to Next. - key := iter.Key() - // value := iter.Value() - if len(key) < 9 { - q.delete(key) - continue - } - - keys[used] = make([]byte, len(key)) - copy(keys[used], key) - used++ - - if used >= q.sendChunk { - err := flush() - if err != nil { - time.Sleep(100 * time.Millisecond) - continue - } - } - } - - flush() -} - -func (q *Queue) sendWorker(exit chan bool) { - t := time.NewTicker(time.Second) - defer t.Stop() - - for { - select { - case <-exit: - return - case <-q.changed: - // pass - case <-t.C: - //pass - } - q.sendAll(exit) - } -} - -// Remove key from queue -func (q *Queue) delete(key []byte) { - err := q.db.Delete([]byte(key), nil) - atomic.AddUint32(&q.stat.deleteCount, 1) - if err != nil { - atomic.AddUint32(&q.stat.deleteErrors, 1) - q.logger.Error("delete from queue database failed", zap.Error(err)) - } -} diff --git a/vendor/github.com/lomik/go-carbon/tags/tags.go b/vendor/github.com/lomik/go-carbon/tags/tags.go deleted file mode 100644 index d7add982..00000000 --- a/vendor/github.com/lomik/go-carbon/tags/tags.go +++ /dev/null @@ -1,90 +0,0 @@ -package tags - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "time" - - "go.uber.org/zap" - - "github.com/lomik/go-carbon/helper" - "github.com/lomik/zapwriter" -) - -type Options struct { - LocalPath string - TagDB string - TagDBTimeout time.Duration - TagDBChunkSize int -} - -type Tags struct { - q *Queue - qErr error // queue initialization error - logger *zap.Logger -} - -func New(options *Options) *Tags { - var send func([]string) error - - t := &Tags{ - logger: zapwriter.Logger("tags"), - } - - u, urlErr := url.Parse(options.TagDB) - if urlErr != nil { - send = func([]string) error { - time.Sleep(time.Second) - t.logger.Error("bad tag url", zap.String("url", options.TagDB), zap.Error(urlErr)) - return urlErr - } - } else { - u.Path = "/tags/tagMultiSeries" - s := u.String() - - send = func(paths []string) error { - client := &http.Client{Timeout: options.TagDBTimeout} - - resp, err := client.PostForm(s, url.Values{"path": paths}) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("bad status code: %d", resp.StatusCode) - } - - ioutil.ReadAll(resp.Body) - return nil - } - } - - t.q, t.qErr = NewQueue(options.LocalPath, send, options.TagDBChunkSize) - - return t -} - -func (t *Tags) Stop() { - t.q.Stop() -} - -func (t *Tags) Add(value string) { - if t.q != nil { - t.q.Add(value) - } else { - t.logger.Error("queue database not initialized", zap.Error(t.qErr)) - } -} - -// Collect metrics -func (t *Tags) Stat(send helper.StatCallback) { - helper.SendAndSubstractUint32("queuePutErrors", &t.q.stat.putErrors, send) - helper.SendAndSubstractUint32("queuePutCount", &t.q.stat.putCount, send) - helper.SendAndSubstractUint32("queueDeleteErrors", &t.q.stat.deleteErrors, send) - helper.SendAndSubstractUint32("queueDeleteCount", &t.q.stat.deleteCount, send) - helper.SendAndSubstractUint32("tagdbSendFail", &t.q.stat.sendFail, send) - helper.SendAndSubstractUint32("tagdbSendSuccess", &t.q.stat.sendSuccess, send) - - send("queueLag", t.q.Lag().Seconds()) -} diff --git a/vendor/github.com/lomik/zapwriter/LICENSE b/vendor/github.com/lomik/zapwriter/LICENSE deleted file mode 100644 index 0db9ff52..00000000 --- a/vendor/github.com/lomik/zapwriter/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 Roman Lomonosov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/lomik/zapwriter/config.go b/vendor/github.com/lomik/zapwriter/config.go deleted file mode 100644 index 9d306b95..00000000 --- a/vendor/github.com/lomik/zapwriter/config.go +++ /dev/null @@ -1,191 +0,0 @@ -package zapwriter - -import ( - "fmt" - "net/url" - "strings" - "time" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Config configures a zapwriter. -// -// If SampleTick is defined, the Sample* parameters are passed to -// zapcore.NewSampler. See their documentation for details. -type Config struct { - Logger string `toml:"logger" json:"logger"` // handler name, default empty - File string `toml:"file" json:"file"` // filename, "stderr", "stdout", "empty" (=="stderr"), "none" - Level string `toml:"level" json:"level"` // "debug", "info", "warn", "error", "dpanic", "panic", and "fatal" - Encoding string `toml:"encoding" json:"encoding"` // "json", "console" - EncodingTime string `toml:"encoding-time" json:"encoding-time"` // "millis", "nanos", "epoch", "iso8601" - EncodingDuration string `toml:"encoding-duration" json:"encoding-duration"` // "seconds", "nanos", "string" - SampleTick string `toml:"sample-tick" json:"sample-tick"` // passed to time.ParseDuration - SampleInitial int `toml:"sample-initial" json:"sample-initial"` // first n messages logged per tick - SampleThereafter int `toml:"sample-thereafter" json:"sample-thereafter"` // every m-th message logged thereafter per tick -} - -func NewConfig() Config { - return Config{ - File: "stderr", - Level: "info", - Encoding: "mixed", - EncodingTime: "iso8601", - EncodingDuration: "seconds", - } -} - -func (c *Config) Clone() *Config { - clone := *c - return &clone -} - -func (c *Config) Check() error { - _, err := c.build(true) - return err -} - -func (c *Config) BuildLogger() (*zap.Logger, error) { - return c.build(false) -} - -func (c *Config) encoder() (zapcore.Encoder, zap.AtomicLevel, error) { - u, err := url.Parse(c.File) - atomicLevel := zap.NewAtomicLevel() - - if err != nil { - return nil, atomicLevel, err - } - - level := u.Query().Get("level") - if level == "" { - level = c.Level - } - - var zapLevel zapcore.Level - if err := zapLevel.UnmarshalText([]byte(level)); err != nil { - return nil, atomicLevel, err - } - - atomicLevel.SetLevel(zapLevel) - - encoding := u.Query().Get("encoding") - if encoding == "" { - encoding = c.Encoding - } - - encodingTime := u.Query().Get("encoding-time") - if encodingTime == "" { - encodingTime = c.EncodingTime - } - - encodingDuration := u.Query().Get("encoding-duration") - if encodingDuration == "" { - encodingDuration = c.EncodingDuration - } - - var encoderTime zapcore.TimeEncoder - - switch strings.ToLower(encodingTime) { - case "millis": - encoderTime = zapcore.EpochMillisTimeEncoder - case "nanos": - encoderTime = zapcore.EpochNanosTimeEncoder - case "epoch": - encoderTime = zapcore.EpochTimeEncoder - case "iso8601", "": - encoderTime = zapcore.ISO8601TimeEncoder - default: - return nil, atomicLevel, fmt.Errorf("unknown time encoding %#v", encodingTime) - } - - var encoderDuration zapcore.DurationEncoder - switch strings.ToLower(encodingDuration) { - case "seconds", "": - encoderDuration = zapcore.SecondsDurationEncoder - case "nanos": - encoderDuration = zapcore.NanosDurationEncoder - case "string": - encoderDuration = zapcore.StringDurationEncoder - default: - return nil, atomicLevel, fmt.Errorf("unknown duration encoding %#v", encodingDuration) - } - - encoderConfig := zapcore.EncoderConfig{ - MessageKey: "message", - LevelKey: "level", - TimeKey: "timestamp", - NameKey: "logger", - CallerKey: "caller", - StacktraceKey: "stacktrace", - EncodeLevel: zapcore.CapitalLevelEncoder, - EncodeTime: encoderTime, - EncodeDuration: encoderDuration, - } - - var encoder zapcore.Encoder - switch strings.ToLower(encoding) { - case "mixed", "": - encoder = NewMixedEncoder(encoderConfig) - case "json": - encoder = zapcore.NewJSONEncoder(encoderConfig) - case "console": - encoder = zapcore.NewConsoleEncoder(encoderConfig) - default: - return nil, atomicLevel, fmt.Errorf("unknown encoding %#v", encoding) - } - - return encoder, atomicLevel, nil -} - -func (c *Config) build(checkOnly bool) (*zap.Logger, error) { - u, err := url.Parse(c.File) - if err != nil { - return nil, err - } - - encoder, atomicLevel, err := c.encoder() - if err != nil { - return nil, err - } - - if checkOnly { - return nil, nil - } - - if strings.ToLower(u.Path) == "none" { - return zap.NewNop(), nil - } - - ws, err := New(c.File) - if err != nil { - return nil, err - } - - core, err := c.core(encoder, ws, atomicLevel) - if err != nil { - return nil, err - } - - return zap.New(core), nil -} - -func (c *Config) core(encoder zapcore.Encoder, ws zapcore.WriteSyncer, atomicLevel zap.AtomicLevel) (zapcore.Core, error) { - core := zapcore.NewCore(encoder, ws, atomicLevel) - - if c.SampleTick != "" { - if c.SampleThereafter == 0 { - return nil, fmt.Errorf("a sample-thereafter value of 0 will cause a runtime divide-by-zero error in zap") - } - - d, err := time.ParseDuration(c.SampleTick) - if err != nil { - return nil, err - } - - core = zapcore.NewSampler(core, d, c.SampleInitial, c.SampleThereafter) - } - - return core, nil -} diff --git a/vendor/github.com/lomik/zapwriter/direct_write_encoder.go b/vendor/github.com/lomik/zapwriter/direct_write_encoder.go deleted file mode 100644 index 68f78255..00000000 --- a/vendor/github.com/lomik/zapwriter/direct_write_encoder.go +++ /dev/null @@ -1,71 +0,0 @@ -package zapwriter - -import ( - "fmt" - "sync" - "time" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/zapcore" -) - -var _directWriteEncoderPool = sync.Pool{ - New: func() interface{} { - return &directWriteEncoder{buf: nil} - }, -} - -func getDirectWriteEncoder() *directWriteEncoder { - return _directWriteEncoderPool.Get().(*directWriteEncoder) -} - -func putDirectWriteEncoder(e *directWriteEncoder) { - e.buf = nil - _directWriteEncoderPool.Put(e) -} - -type directWriteEncoder struct { - buf *buffer.Buffer -} - -func (s *directWriteEncoder) AppendArray(v zapcore.ArrayMarshaler) error { - enc := &directWriteEncoder{buf: s.buf} - err := v.MarshalLogArray(enc) - return err -} - -func (s *directWriteEncoder) AppendObject(v zapcore.ObjectMarshaler) error { - m := zapcore.NewMapObjectEncoder() - err := v.MarshalLogObject(m) - if err != nil { - return err - } - _, err = fmt.Fprint(s.buf, m.Fields) - return err -} - -func (s *directWriteEncoder) AppendReflected(v interface{}) error { - _, err := fmt.Fprint(s.buf, v) - return err -} - -func (s *directWriteEncoder) AppendBool(v bool) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendByteString(v []byte) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendComplex128(v complex128) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendComplex64(v complex64) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendDuration(v time.Duration) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendFloat64(v float64) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendFloat32(v float32) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendInt(v int) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendInt64(v int64) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendInt32(v int32) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendInt16(v int16) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendInt8(v int8) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendString(v string) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendTime(v time.Time) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendUint(v uint) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendUint64(v uint64) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendUint32(v uint32) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendUint16(v uint16) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendUint8(v uint8) { fmt.Fprint(s.buf, v) } -func (s *directWriteEncoder) AppendUintptr(v uintptr) { fmt.Fprint(s.buf, v) } diff --git a/vendor/github.com/lomik/zapwriter/dsn.go b/vendor/github.com/lomik/zapwriter/dsn.go deleted file mode 100644 index bb892a73..00000000 --- a/vendor/github.com/lomik/zapwriter/dsn.go +++ /dev/null @@ -1,88 +0,0 @@ -package zapwriter - -import ( - "fmt" - "net/url" - "strconv" - "strings" - "time" -) - -type DsnObj struct { - url.Values -} - -func DSN(values url.Values) *DsnObj { - return &DsnObj{values} -} - -func (dsn *DsnObj) Duration(key string, initial string) (time.Duration, error) { - s := dsn.Get(key) - if s == "" { - return time.ParseDuration(initial) - } - return time.ParseDuration(s) -} - -func (dsn *DsnObj) Int(key string, initial int) (int, error) { - s := dsn.Get(key) - if s == "" { - return initial, nil - } - return strconv.Atoi(s) -} - -func (dsn *DsnObj) Bool(key string, initial bool) (bool, error) { - s := dsn.Get(key) - if s == "" { - return initial, nil - } - - return map[string]bool{ - "true": true, - "1": true, - }[strings.ToLower(s)], nil -} - -func (dsn *DsnObj) String(key string, initial string) (string, error) { - s := dsn.Get(key) - if s == "" { - return initial, nil - } - return s, nil -} - -func (dsn *DsnObj) StringRequired(key string) (string, error) { - s := dsn.Get(key) - if s == "" { - return "", fmt.Errorf("%#v is required", key) - } - return s, nil -} - -func (dsn *DsnObj) SetInt(p *int, key string) error { - var err error - *p, err = dsn.Int(key, *p) - return err -} - -func (dsn *DsnObj) SetDuration(p *time.Duration, key string) error { - var err error - *p, err = dsn.Duration(key, p.String()) - return err -} - -func (dsn *DsnObj) SetString(p *string, key string) error { - var err error - *p, err = dsn.String(key, *p) - return err -} - -func AnyError(e ...error) error { - for i := 0; i < len(e); i++ { - if e[i] != nil { - return e[i] - } - } - return nil -} diff --git a/vendor/github.com/lomik/zapwriter/file.go b/vendor/github.com/lomik/zapwriter/file.go deleted file mode 100644 index 873e3eb0..00000000 --- a/vendor/github.com/lomik/zapwriter/file.go +++ /dev/null @@ -1,166 +0,0 @@ -package zapwriter - -import ( - "fmt" - "net/url" - "os" - "os/user" - "path/filepath" - "strconv" - "sync" - "time" -) - -// with external rotate support -type FileOutput struct { - sync.Mutex - timeout time.Duration - interval time.Duration - checkNext time.Time - f *os.File - path string // filename - - exit chan interface{} - exitOnce sync.Once - exitWg sync.WaitGroup -} - -func newFileOutput(path string) (*FileOutput, error) { - u, err := url.Parse(path) - - if err != nil { - return nil, err - } - - var timeout time.Duration - var interval time.Duration - - params := DSN(u.Query()) - if timeout, err = params.Duration("timeout", "1s"); err != nil { - return nil, err - } - - if interval, err = params.Duration("interval", "1s"); err != nil { - return nil, err - } - - f, err := os.OpenFile(u.Path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - - r := &FileOutput{ - checkNext: time.Now().Add(timeout), - timeout: timeout, - interval: interval, - f: f, - path: u.Path, - exit: make(chan interface{}), - } - - r.exitWg.Add(1) - go func() { - r.reopenChecker(r.exit) - r.exitWg.Done() - }() - - return r, nil -} - -func File(path string) (*FileOutput, error) { - return newFileOutput(path) -} - -func (r *FileOutput) reopenChecker(exit chan interface{}) { - ticker := time.NewTicker(r.interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - r.doWithCheck(func() {}) - case <-exit: - return - } - } -} - -func (r *FileOutput) reopen() *os.File { - prev := r.f - next, err := os.OpenFile(r.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - fmt.Println(err.Error()) - return r.f - } - - r.f = next - prev.Close() - return r.f -} - -func (r *FileOutput) Write(p []byte) (n int, err error) { - r.doWithCheck(func() { n, err = r.f.Write(p) }) - return -} - -func (r *FileOutput) Sync() (err error) { - r.doWithCheck(func() { err = r.f.Sync() }) - return -} - -func (r *FileOutput) Close() (err error) { - r.exitOnce.Do(func() { - close(r.exit) - }) - r.exitWg.Wait() - err = r.f.Close() - return -} - -func PrepareFileForUser(filename string, owner *user.User) error { - u, err := url.Parse(filename) - if err != nil { - return err - } - - if u.Path == "" || u.Path == "stderr" || u.Path == "stdout" || u.Path == "none" { - return nil - } - - if u.Scheme != "" && u.Scheme != "file" { - return nil - } - - if err := os.MkdirAll(filepath.Dir(u.Path), 0755); err != nil { - return err - } - - fd, err := os.OpenFile(u.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) - if fd != nil { - fd.Close() - } - if err != nil { - return err - } - if err := os.Chmod(u.Path, 0644); err != nil { - return err - } - if owner != nil { - - uid, err := strconv.ParseInt(owner.Uid, 10, 0) - if err != nil { - return err - } - - gid, err := strconv.ParseInt(owner.Gid, 10, 0) - if err != nil { - return err - } - - if err := os.Chown(u.Path, int(uid), int(gid)); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/lomik/zapwriter/file_check_nix.go b/vendor/github.com/lomik/zapwriter/file_check_nix.go deleted file mode 100644 index cf34d45e..00000000 --- a/vendor/github.com/lomik/zapwriter/file_check_nix.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build !windows - -package zapwriter - -import ( - "time" - "fmt" - "syscall" - "os" -) - -func (r *FileOutput) doWithCheck(f func()) { - r.Lock() - defer r.Unlock() - r.check() - f() -} - -func (r *FileOutput) check() { - now := time.Now() - - if now.Before(r.checkNext) { - return - } - - r.checkNext = time.Now().Add(r.timeout) - - fInfo, err := r.f.Stat() - if err != nil { - fmt.Println(err.Error()) - return - } - - fStat, ok := fInfo.Sys().(*syscall.Stat_t) - if !ok { - fmt.Println("Not a syscall.Stat_t") - return - } - - pInfo, err := os.Stat(r.path) - if err != nil { - // file deleted (?) - r.reopen() - return - } - - pStat, ok := pInfo.Sys().(*syscall.Stat_t) - if !ok { - fmt.Println("Not a syscall.Stat_t") - return - } - - if fStat.Ino != pStat.Ino { - // file on disk changed - r.reopen() - return - } -} diff --git a/vendor/github.com/lomik/zapwriter/file_check_windows.go b/vendor/github.com/lomik/zapwriter/file_check_windows.go deleted file mode 100644 index 52a8172c..00000000 --- a/vendor/github.com/lomik/zapwriter/file_check_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package zapwriter - -func (r *FileOutput) doWithCheck(f func()) { - f() -} diff --git a/vendor/github.com/lomik/zapwriter/json_encoder.go b/vendor/github.com/lomik/zapwriter/json_encoder.go deleted file mode 100644 index 6f7a549e..00000000 --- a/vendor/github.com/lomik/zapwriter/json_encoder.go +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapwriter - -import ( - "encoding/base64" - "encoding/json" - "math" - "sync" - "time" - "unicode/utf8" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/zapcore" -) - -// For JSON-escaping; see jsonEncoder.safeAddString below. -const _hex = "0123456789abcdef" - -var _jsonPool = sync.Pool{New: func() interface{} { - return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} - -func putJSONEncoder(enc *jsonEncoder) { - enc.EncoderConfig = nil - enc.buf = nil - enc.spaced = false - enc.openNamespaces = 0 - _jsonPool.Put(enc) -} - -type jsonEncoder struct { - *zapcore.EncoderConfig - buf *buffer.Buffer - spaced bool // include spaces after colons and commas - openNamespaces int -} - -// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder -// appropriately escapes all field keys and values. -// -// Note that the encoder doesn't deduplicate keys, so it's possible to produce -// a message like -// {"foo":"bar","foo":"baz"} -// This is permitted by the JSON specification, but not encouraged. Many -// libraries will ignore duplicate key-value pairs (typically keeping the last -// pair) when unmarshaling, but users should attempt to avoid adding duplicate -// keys. -func NewJSONEncoder(cfg zapcore.EncoderConfig) zapcore.Encoder { - return newJSONEncoder(cfg, false) -} - -func newJSONEncoder(cfg zapcore.EncoderConfig, spaced bool) *jsonEncoder { - return &jsonEncoder{ - EncoderConfig: &cfg, - buf: bufferpool.Get(), - spaced: spaced, - } -} - -func (enc *jsonEncoder) AddArray(key string, arr zapcore.ArrayMarshaler) error { - enc.addKey(key) - return enc.AppendArray(arr) -} - -func (enc *jsonEncoder) AddObject(key string, obj zapcore.ObjectMarshaler) error { - enc.addKey(key) - return enc.AppendObject(obj) -} - -func (enc *jsonEncoder) AddBinary(key string, val []byte) { - enc.AddString(key, base64.StdEncoding.EncodeToString(val)) -} - -func (enc *jsonEncoder) AddByteString(key string, val []byte) { - enc.addKey(key) - enc.AppendByteString(val) -} - -func (enc *jsonEncoder) AddBool(key string, val bool) { - enc.addKey(key) - enc.AppendBool(val) -} - -func (enc *jsonEncoder) AddComplex128(key string, val complex128) { - enc.addKey(key) - enc.AppendComplex128(val) -} - -func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { - enc.addKey(key) - enc.AppendDuration(val) -} - -func (enc *jsonEncoder) AddFloat64(key string, val float64) { - enc.addKey(key) - enc.AppendFloat64(val) -} - -func (enc *jsonEncoder) AddInt64(key string, val int64) { - enc.addKey(key) - enc.AppendInt64(val) -} - -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - marshaled, err := json.Marshal(obj) - if err != nil { - return err - } - enc.addKey(key) - _, err = enc.buf.Write(marshaled) - return err -} - -func (enc *jsonEncoder) OpenNamespace(key string) { - enc.addKey(key) - enc.buf.AppendByte('{') - enc.openNamespaces++ -} - -func (enc *jsonEncoder) AddString(key, val string) { - enc.addKey(key) - enc.AppendString(val) -} - -func (enc *jsonEncoder) AddTime(key string, val time.Time) { - enc.addKey(key) - enc.AppendTime(val) -} - -func (enc *jsonEncoder) AddUint64(key string, val uint64) { - enc.addKey(key) - enc.AppendUint64(val) -} - -func (enc *jsonEncoder) AppendArray(arr zapcore.ArrayMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('[') - err := arr.MarshalLogArray(enc) - enc.buf.AppendByte(']') - return err -} - -func (enc *jsonEncoder) AppendObject(obj zapcore.ObjectMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('{') - err := obj.MarshalLogObject(enc) - enc.buf.AppendByte('}') - return err -} - -func (enc *jsonEncoder) AppendBool(val bool) { - enc.addElementSeparator() - enc.buf.AppendBool(val) -} - -func (enc *jsonEncoder) AppendByteString(val []byte) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddByteString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendComplex128(val complex128) { - enc.addElementSeparator() - // Cast to a platform-independent, fixed-size type. - r, i := float64(real(val)), float64(imag(val)) - enc.buf.AppendByte('"') - // Because we're always in a quoted string, we can use strconv without - // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') - enc.buf.AppendFloat(i, 64) - enc.buf.AppendByte('i') - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendDuration(val time.Duration) { - cur := enc.buf.Len() - enc.EncodeDuration(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep - // JSON valid. - enc.AppendInt64(int64(val)) - } -} - -func (enc *jsonEncoder) AppendInt64(val int64) { - enc.addElementSeparator() - enc.buf.AppendInt(val) -} - -func (enc *jsonEncoder) AppendReflected(val interface{}) error { - marshaled, err := json.Marshal(val) - if err != nil { - return err - } - enc.addElementSeparator() - _, err = enc.buf.Write(marshaled) - return err -} - -func (enc *jsonEncoder) AppendString(val string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTime(val time.Time) { - cur := enc.buf.Len() - enc.EncodeTime(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep - // output JSON valid. - enc.AppendInt64(val.UnixNano()) - } -} - -func (enc *jsonEncoder) AppendUint64(val uint64) { - enc.addElementSeparator() - enc.buf.AppendUint(val) -} - -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } - -func (enc *jsonEncoder) Clone() zapcore.Encoder { - clone := enc.clone() - clone.buf.Write(enc.buf.Bytes()) - return clone -} - -func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() - clone.EncoderConfig = enc.EncoderConfig - clone.spaced = enc.spaced - clone.openNamespaces = enc.openNamespaces - clone.buf = bufferpool.Get() - return clone -} - -func (enc *jsonEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { - final := enc.clone() - final.buf.AppendByte('{') - - if final.LevelKey != "" { - final.addKey(final.LevelKey) - cur := final.buf.Len() - final.EncodeLevel(ent.Level, final) - if cur == final.buf.Len() { - // User-supplied EncodeLevel was a no-op. Fall back to strings to keep - // output JSON valid. - final.AppendString(ent.Level.String()) - } - } - if final.TimeKey != "" { - final.AddTime(final.TimeKey, ent.Time) - } - if ent.LoggerName != "" && final.NameKey != "" { - final.addKey(final.NameKey) - final.AppendString(ent.LoggerName) - } - if ent.Caller.Defined && final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) - } - } - if final.MessageKey != "" { - final.addKey(enc.MessageKey) - final.AppendString(ent.Message) - } - if enc.buf.Len() > 0 { - final.addElementSeparator() - final.buf.Write(enc.buf.Bytes()) - } - addFields(final, fields) - final.closeOpenNamespaces() - if ent.Stack != "" && final.StacktraceKey != "" { - final.AddString(final.StacktraceKey, ent.Stack) - } - final.buf.AppendByte('}') - final.buf.AppendByte('\n') - - ret := final.buf - putJSONEncoder(final) - return ret, nil -} - -func (enc *jsonEncoder) truncate() { - enc.buf.Reset() -} - -func (enc *jsonEncoder) closeOpenNamespaces() { - for i := 0; i < enc.openNamespaces; i++ { - enc.buf.AppendByte('}') - } -} - -func (enc *jsonEncoder) addKey(key string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(key) - enc.buf.AppendByte('"') - enc.buf.AppendByte(':') - if enc.spaced { - enc.buf.AppendByte(' ') - } -} - -func (enc *jsonEncoder) addElementSeparator() { - last := enc.buf.Len() - 1 - if last < 0 { - return - } - switch enc.buf.Bytes()[last] { - case '{', '[', ':', ',', ' ': - return - default: - enc.buf.AppendByte(',') - if enc.spaced { - enc.buf.AppendByte(' ') - } - } -} - -func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { - enc.addElementSeparator() - switch { - case math.IsNaN(val): - enc.buf.AppendString(`"NaN"`) - case math.IsInf(val, 1): - enc.buf.AppendString(`"+Inf"`) - case math.IsInf(val, -1): - enc.buf.AppendString(`"-Inf"`) - default: - enc.buf.AppendFloat(val, bitSize) - } -} - -// safeAddString JSON-escapes a string and appends it to the internal buffer. -// Unlike the standard library's encoder, it doesn't attempt to protect the -// user from browser vulnerabilities or JSONP-related problems. -func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } -} - -// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. -func (enc *jsonEncoder) safeAddByteString(s []byte) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} - -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false -} diff --git a/vendor/github.com/lomik/zapwriter/manager.go b/vendor/github.com/lomik/zapwriter/manager.go deleted file mode 100644 index b91e76fe..00000000 --- a/vendor/github.com/lomik/zapwriter/manager.go +++ /dev/null @@ -1,171 +0,0 @@ -package zapwriter - -import ( - "fmt" - "net/url" - "strings" - "sync" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -var _mutex sync.RWMutex -var _manager Manager - -func init() { - replaceGlobalManager(&manager{ - writers: make(map[string]WriteSyncer), - cores: make(map[string][]zapcore.Core), - loggers: make(map[string]*zap.Logger), - }) -} - -func replaceGlobalManager(m Manager) Manager { - _mutex.Lock() - prev := _manager - _manager = m - zap.ReplaceGlobals(_manager.Default()) - _mutex.Unlock() - return prev -} - -func CheckConfig(conf []Config, allowNames []string) error { - _, err := makeManager(conf, true, allowNames) - return err -} - -func ApplyConfig(conf []Config) error { - m, err := NewManager(conf) - if err != nil { - return err - } - - replaceGlobalManager(m) - - return nil -} - -func Default() *zap.Logger { - _mutex.RLock() - m := _manager - _mutex.RUnlock() - return m.Default() -} - -func Logger(logger string) *zap.Logger { - _mutex.RLock() - m := _manager - _mutex.RUnlock() - return m.Logger(logger) -} - -type Manager interface { - Default() *zap.Logger - Logger(logger string) *zap.Logger -} - -type manager struct { - writers map[string]WriteSyncer // path -> writer - cores map[string][]zapcore.Core // logger name -> cores - loggers map[string]*zap.Logger // logger name -> logger -} - -func NewManager(conf []Config) (Manager, error) { - return makeManager(conf, false, nil) -} - -func (m *manager) Default() *zap.Logger { - if logger, ok := m.loggers[""]; ok { - return logger - } - return zap.NewNop() -} - -func (m *manager) Logger(name string) *zap.Logger { - if logger, ok := m.loggers[name]; ok { - return logger.Named(name) - } - return m.Default().Named(name) -} - -func makeManager(conf []Config, checkOnly bool, allowNames []string) (Manager, error) { - // check names - if allowNames != nil { - namesMap := make(map[string]bool) - namesMap[""] = true - for _, s := range allowNames { - namesMap[s] = true - } - - for _, cfg := range conf { - if !namesMap[cfg.Logger] { - return nil, fmt.Errorf("unknown logger name %#v", cfg.Logger) - } - } - } - - // check config params - for _, cfg := range conf { - _, _, err := cfg.encoder() - if err != nil { - return nil, err - } - } - - // check complete - if checkOnly { - return nil, nil - } - - m := &manager{ - writers: make(map[string]WriteSyncer), - cores: make(map[string][]zapcore.Core), - loggers: make(map[string]*zap.Logger), - } - - // create writers and cores - for _, cfg := range conf { - u, err := url.Parse(cfg.File) - if err != nil { - return nil, err - } - - if _, ok := m.cores[cfg.Logger]; !ok { - m.cores[cfg.Logger] = make([]zapcore.Core, 0) - } - - if strings.ToLower(u.Path) == "none" { - m.cores[cfg.Logger] = append(m.cores[cfg.Logger], zapcore.NewNopCore()) - continue - } - - encoder, atomicLevel, err := cfg.encoder() - if err != nil { - return nil, err - } - - ws, ok := m.writers[u.Path] - if !ok { - ws, err = New(cfg.File) - if err != nil { - return nil, err - } - m.writers[u.Path] = ws - } - - core, err := cfg.core(encoder, ws, atomicLevel) - if err != nil { - return nil, err - } - - m.cores[cfg.Logger] = append(m.cores[cfg.Logger], core) - } - - // make loggers - for k, cores := range m.cores { - m.loggers[k] = zap.New(zapcore.NewTee(cores...)) - } - - return m, nil -} diff --git a/vendor/github.com/lomik/zapwriter/mixed_encoder.go b/vendor/github.com/lomik/zapwriter/mixed_encoder.go deleted file mode 100644 index 092ba3d8..00000000 --- a/vendor/github.com/lomik/zapwriter/mixed_encoder.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapwriter - -import ( - "go.uber.org/zap/buffer" - "go.uber.org/zap/zapcore" -) - -var bufferpool = buffer.NewPool() - -type mixedEncoder struct { - *jsonEncoder -} - -func NewMixedEncoder(cfg zapcore.EncoderConfig) zapcore.Encoder { - return mixedEncoder{newJSONEncoder(cfg, true)} -} - -func (enc mixedEncoder) Clone() zapcore.Encoder { - return mixedEncoder{enc.jsonEncoder.Clone().(*jsonEncoder)} -} - -func (enc mixedEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { - final := enc.clone() - - wr := getDirectWriteEncoder() - wr.buf = final.buf - - if enc.TimeKey != "" && enc.EncodeTime != nil { - final.buf.AppendByte('[') - enc.EncodeTime(ent.Time, wr) - final.buf.AppendByte(']') - } - - if enc.LevelKey != "" && enc.EncodeLevel != nil { - if final.buf.Len() > 0 { - final.buf.AppendByte(' ') - } - enc.EncodeLevel(ent.Level, wr) - } - - if ent.LoggerName != "" && enc.NameKey != "" { - if final.buf.Len() > 0 { - final.buf.AppendByte(' ') - } - final.buf.AppendByte('[') - wr.AppendString(ent.LoggerName) - final.buf.AppendByte(']') - } - - if ent.Caller.Defined && enc.CallerKey != "" && enc.EncodeCaller != nil { - if final.buf.Len() > 0 { - final.buf.AppendByte(' ') - } - enc.EncodeCaller(ent.Caller, wr) - final.buf.AppendByte(' ') - } - - putDirectWriteEncoder(wr) - - // Add the message itself. - if final.MessageKey != "" { - if final.buf.Len() > 0 { - final.buf.AppendByte(' ') - } - final.buf.AppendString(ent.Message) - } - - if final.buf.Len() > 0 { - final.buf.AppendByte(' ') - } - - final.buf.AppendByte('{') - - if enc.buf.Len() > 0 { - final.addElementSeparator() - final.buf.Write(enc.buf.Bytes()) - } - addFields(final, fields) - final.closeOpenNamespaces() - if ent.Stack != "" && final.StacktraceKey != "" { - final.AddString(final.StacktraceKey, ent.Stack) - } - final.buf.AppendByte('}') - final.buf.AppendByte('\n') - - ret := final.buf - return ret, nil -} diff --git a/vendor/github.com/lomik/zapwriter/output.go b/vendor/github.com/lomik/zapwriter/output.go deleted file mode 100644 index 08defa85..00000000 --- a/vendor/github.com/lomik/zapwriter/output.go +++ /dev/null @@ -1,128 +0,0 @@ -package zapwriter - -import ( - "fmt" - "io" - "log" - "net/url" - "os" - "sync" -) - -type WriteSyncer interface { - io.Writer - Sync() error -} - -type closeable interface { - Close() (err error) -} - -type Output interface { - io.Writer - Sync() error -} - -var knownSchemes = make(map[string](func(string) (Output, error))) -var knownSchemesMutex sync.RWMutex - -func RegisterScheme(scheme string, constructor func(path string) (Output, error)) { - knownSchemesMutex.Lock() - if _, exists := knownSchemes[scheme]; exists { - log.Fatalf("scheme %#v already registered", scheme) - } - knownSchemes[scheme] = constructor - knownSchemesMutex.Unlock() -} - -type output struct { - sync.RWMutex - out WriteSyncer - closeable bool - dsn string -} - -func New(dsn string) (Output, error) { - o := &output{} - - err := o.apply(dsn) - if err != nil { - return nil, err - } - - return o, err -} - -func (o *output) apply(dsn string) error { - if dsn == o.dsn && o.out != nil { // nothing changed - return nil - } - - var newOut WriteSyncer - var newCloseable bool - - u, err := url.Parse(dsn) - if err != nil { - return err - } - - if u.Path == "" || u.Path == "stderr" { - newOut = os.Stderr - } else if u.Path == "stdout" { - newOut = os.Stdout - } else { - if u.Scheme == "" || u.Scheme == "file" { - newOut, err = File(u.Path) - if err != nil { - return err - } - newCloseable = true - } else { - knownSchemesMutex.RLock() - newFunc, exists := knownSchemes[u.Scheme] - knownSchemesMutex.RUnlock() - - if !exists { - return fmt.Errorf("unknown scheme %#v", u.Scheme) - } - - newOut, err = newFunc(u.String()) - if err != nil { - return err - } - if _, ok := newOut.(closeable); ok { - newCloseable = true - } - } - } - - if o.out != nil && o.closeable { - if c, ok := o.out.(closeable); ok { - c.Close() - } - o.out = nil - } - - o.out = newOut - o.closeable = newCloseable - - return nil -} - -func (o *output) Sync() (err error) { - o.RLock() - if o.out != nil { - err = o.out.Sync() - } - o.RUnlock() - return -} - -func (o *output) Write(p []byte) (n int, err error) { - o.RLock() - if o.out != nil { - n, err = o.out.Write(p) - } - o.RUnlock() - return -} diff --git a/vendor/github.com/lomik/zapwriter/testing.go b/vendor/github.com/lomik/zapwriter/testing.go deleted file mode 100644 index 639386a5..00000000 --- a/vendor/github.com/lomik/zapwriter/testing.go +++ /dev/null @@ -1,87 +0,0 @@ -package zapwriter - -import ( - "bytes" - "sync" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -var _testBuffer testBuffer - -type testBuffer struct { - bytes.Buffer - mu sync.Mutex -} - -func (b *testBuffer) Write(p []byte) (n int, err error) { - b.mu.Lock() - defer b.mu.Unlock() - return b.Buffer.Write(p) -} - -func (b *testBuffer) Sync() error { - return nil -} - -func (b *testBuffer) String() string { - b.mu.Lock() - defer b.mu.Unlock() - return b.Buffer.String() -} - -func (b *testBuffer) Reset() { - b.mu.Lock() - defer b.mu.Unlock() - b.Buffer.Reset() -} - -// Capture = String + Reset -func (b *testBuffer) Capture() string { - b.mu.Lock() - defer b.mu.Unlock() - out := b.Buffer.String() - b.Buffer.Reset() - return out -} - -func Test() func() { - cfg := NewConfig() - return testWithConfig(cfg) -} - -func testWithConfig(cfg Config) func() { - encoder, _, _ := cfg.encoder() - - logger := zap.New( - zapcore.NewCore( - encoder, - &_testBuffer, - zapcore.DebugLevel, - ), - ) - - m := &manager{ - writers: make(map[string]WriteSyncer), - cores: make(map[string][]zapcore.Core), - loggers: make(map[string]*zap.Logger), - } - - m.loggers[""] = logger - - _testBuffer.Reset() - prev := replaceGlobalManager(m) - - return func() { - replaceGlobalManager(prev) - } -} - -func TestCapture() string { - return _testBuffer.Capture() -} - -func TestString() string { - return _testBuffer.String() -} diff --git a/vendor/github.com/lomik/zapwriter/zap_copy_paste.go b/vendor/github.com/lomik/zapwriter/zap_copy_paste.go deleted file mode 100644 index f7ffe65e..00000000 --- a/vendor/github.com/lomik/zapwriter/zap_copy_paste.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapwriter - -import ( - "go.uber.org/zap/zapcore" -) - -func addFields(enc zapcore.ObjectEncoder, fields []zapcore.Field) { - for i := range fields { - fields[i].AddTo(enc) - } -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 00000000..c67dad61 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 00000000..003e99fa --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/testify/LICENCE.txt new file mode 100644 index 00000000..473b670a --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENCE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 00000000..473b670a --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 00000000..e6a79604 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,387 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package assert + +import ( + + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + return Error(a.t, err, msgAndArgs...) +} + + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + return FailNow(a.t, failureMessage, msgAndArgs...) +} + + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str) +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPError(a.t, handler, method, url, values) +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPRedirect(a.t, handler, method, url, values) +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPSuccess(a.t, handler, method, url, values) +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + return NoError(a.t, err, msgAndArgs...) +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + return NotZero(a.t, i, msgAndArgs...) +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + return Zero(a.t, i, msgAndArgs...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 00000000..b3f4e170 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1052 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "math" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +func init() { + spew.Config.SortKeys = true +} + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + + return reflect.DeepEqual(expected, actual) + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + dir := parts[len(parts)-2] + file = parts[len(parts)-1] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's +// test printing (see inner comment for specifics) +func indentMessageLines(message string, tabs int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + if i != 0 { + outBuf.WriteRune('\n') + } + for ii := 0; ii < tabs; ii++ { + outBuf.WriteRune('\t') + // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter + // by 1 prematurely. + if ii == 0 && i > 0 { + ii++ + } + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + + errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") + if len(message) > 0 { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n"+ + "\r\tMessages:\t%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2), + message) + } else { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2)) + } + + return false +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true + +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: %s (expected)\n"+ + " != %s (actual)%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType && isNumericType(aType) && isNumericType(bType) { + return fmt.Sprintf("%v(%#v)", aType, expected), + fmt.Sprintf("%v(%#v)", bType, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + +func isNumericType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + } + + return false +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)", expected, actual), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +var numericZeros = []interface{}{ + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + float32(0), + float64(0), +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + for _, v := range numericZeros { + if object == v { + return true + } + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + case reflect.Map: + fallthrough + case reflect.Slice, reflect.Chan: + { + return (objValue.Len() == 0) + } + case reflect.Struct: + switch object.(type) { + case time.Time: + return object.(time.Time).IsZero() + } + case reflect.Ptr: + { + if objValue.IsNil() { + return true + } + switch object.(type) { + case *time.Time: + return object.(*time.Time).IsZero() + default: + return false + } + } + } + return false +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error %+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString, "An error was expected") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + return false + } + s := "An error with value \"%s\" is expected but got \"%s\". %s" + return Equal(t, errString, theError.Error(), + s, errString, theError.Error(), message) +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type and returns the truth. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spew.Sdump(expected) + a := spew.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 00000000..c9dccc4d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 00000000..ac9dc9d1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 00000000..b867e95e --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 00000000..fa7ab89b --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,106 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 +// if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return -1 + } + handler(w, req) + return w.Code +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusOK && code <= http.StatusPartialContent +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusBadRequest +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE deleted file mode 100644 index 4a772d1a..00000000 --- a/vendor/github.com/syndtr/goleveldb/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2012 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go deleted file mode 100644 index 22592000..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// ErrBatchCorrupted records reason of batch corruption. This error will be -// wrapped with errors.ErrCorrupted. -type ErrBatchCorrupted struct { - Reason string -} - -func (e *ErrBatchCorrupted) Error() string { - return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) -} - -func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) -} - -const ( - batchHeaderLen = 8 + 4 - batchGrowRec = 3000 - batchBufioSize = 16 -) - -// BatchReplay wraps basic batch operations. -type BatchReplay interface { - Put(key, value []byte) - Delete(key []byte) -} - -type batchIndex struct { - keyType keyType - keyPos, keyLen int - valuePos, valueLen int -} - -func (index batchIndex) k(data []byte) []byte { - return data[index.keyPos : index.keyPos+index.keyLen] -} - -func (index batchIndex) v(data []byte) []byte { - if index.valueLen != 0 { - return data[index.valuePos : index.valuePos+index.valueLen] - } - return nil -} - -func (index batchIndex) kv(data []byte) (key, value []byte) { - return index.k(data), index.v(data) -} - -// Batch is a write batch. -type Batch struct { - data []byte - index []batchIndex - - // internalLen is sums of key/value pair length plus 8-bytes internal key. - internalLen int -} - -func (b *Batch) grow(n int) { - o := len(b.data) - if cap(b.data)-o < n { - div := 1 - if len(b.index) > batchGrowRec { - div = len(b.index) / batchGrowRec - } - ndata := make([]byte, o, o+n+o/div) - copy(ndata, b.data) - b.data = ndata - } -} - -func (b *Batch) appendRec(kt keyType, key, value []byte) { - n := 1 + binary.MaxVarintLen32 + len(key) - if kt == keyTypeVal { - n += binary.MaxVarintLen32 + len(value) - } - b.grow(n) - index := batchIndex{keyType: kt} - o := len(b.data) - data := b.data[:o+n] - data[o] = byte(kt) - o++ - o += binary.PutUvarint(data[o:], uint64(len(key))) - index.keyPos = o - index.keyLen = len(key) - o += copy(data[o:], key) - if kt == keyTypeVal { - o += binary.PutUvarint(data[o:], uint64(len(value))) - index.valuePos = o - index.valueLen = len(value) - o += copy(data[o:], value) - } - b.data = data[:o] - b.index = append(b.index, index) - b.internalLen += index.keyLen + index.valueLen + 8 -} - -// Put appends 'put operation' of the given key/value pair to the batch. -// It is safe to modify the contents of the argument after Put returns but not -// before. -func (b *Batch) Put(key, value []byte) { - b.appendRec(keyTypeVal, key, value) -} - -// Delete appends 'delete operation' of the given key to the batch. -// It is safe to modify the contents of the argument after Delete returns but -// not before. -func (b *Batch) Delete(key []byte) { - b.appendRec(keyTypeDel, key, nil) -} - -// Dump dumps batch contents. The returned slice can be loaded into the -// batch using Load method. -// The returned slice is not its own copy, so the contents should not be -// modified. -func (b *Batch) Dump() []byte { - return b.data -} - -// Load loads given slice into the batch. Previous contents of the batch -// will be discarded. -// The given slice will not be copied and will be used as batch buffer, so -// it is not safe to modify the contents of the slice. -func (b *Batch) Load(data []byte) error { - return b.decode(data, -1) -} - -// Replay replays batch contents. -func (b *Batch) Replay(r BatchReplay) error { - for _, index := range b.index { - switch index.keyType { - case keyTypeVal: - r.Put(index.k(b.data), index.v(b.data)) - case keyTypeDel: - r.Delete(index.k(b.data)) - } - } - return nil -} - -// Len returns number of records in the batch. -func (b *Batch) Len() int { - return len(b.index) -} - -// Reset resets the batch. -func (b *Batch) Reset() { - b.data = b.data[:0] - b.index = b.index[:0] - b.internalLen = 0 -} - -func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error { - for i, index := range b.index { - if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil { - return err - } - } - return nil -} - -func (b *Batch) append(p *Batch) { - ob := len(b.data) - oi := len(b.index) - b.data = append(b.data, p.data...) - b.index = append(b.index, p.index...) - b.internalLen += p.internalLen - - // Updating index offset. - if ob != 0 { - for ; oi < len(b.index); oi++ { - index := &b.index[oi] - index.keyPos += ob - if index.valueLen != 0 { - index.valuePos += ob - } - } - } -} - -func (b *Batch) decode(data []byte, expectedLen int) error { - b.data = data - b.index = b.index[:0] - b.internalLen = 0 - err := decodeBatch(data, func(i int, index batchIndex) error { - b.index = append(b.index, index) - b.internalLen += index.keyLen + index.valueLen + 8 - return nil - }) - if err != nil { - return err - } - if expectedLen >= 0 && len(b.index) != expectedLen { - return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index))) - } - return nil -} - -func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error { - var ik []byte - for i, index := range b.index { - ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) - if err := mdb.Put(ik, index.v(b.data)); err != nil { - return err - } - } - return nil -} - -func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error { - var ik []byte - for i, index := range b.index { - ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) - if err := mdb.Delete(ik); err != nil { - return err - } - } - return nil -} - -func newBatch() interface{} { - return &Batch{} -} - -func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error { - var index batchIndex - for i, o := 0, 0; o < len(data); i++ { - // Key type. - index.keyType = keyType(data[o]) - if index.keyType > keyTypeVal { - return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType))) - } - o++ - - // Key. - x, n := binary.Uvarint(data[o:]) - o += n - if n <= 0 || o+int(x) > len(data) { - return newErrBatchCorrupted("bad record: invalid key length") - } - index.keyPos = o - index.keyLen = int(x) - o += index.keyLen - - // Value. - if index.keyType == keyTypeVal { - x, n = binary.Uvarint(data[o:]) - o += n - if n <= 0 || o+int(x) > len(data) { - return newErrBatchCorrupted("bad record: invalid value length") - } - index.valuePos = o - index.valueLen = int(x) - o += index.valueLen - } else { - index.valuePos = 0 - index.valueLen = 0 - } - - if err := fn(i, index); err != nil { - return err - } - } - return nil -} - -func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) { - seq, batchLen, err = decodeBatchHeader(data) - if err != nil { - return 0, 0, err - } - if seq < expectSeq { - return 0, 0, newErrBatchCorrupted("invalid sequence number") - } - data = data[batchHeaderLen:] - var ik []byte - var decodedLen int - err = decodeBatch(data, func(i int, index batchIndex) error { - if i >= batchLen { - return newErrBatchCorrupted("invalid records length") - } - ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType) - if err := mdb.Put(ik, index.v(data)); err != nil { - return err - } - decodedLen++ - return nil - }) - if err == nil && decodedLen != batchLen { - err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen)) - } - return -} - -func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte { - dst = ensureBuffer(dst, batchHeaderLen) - binary.LittleEndian.PutUint64(dst, seq) - binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen)) - return dst -} - -func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) { - if len(data) < batchHeaderLen { - return 0, 0, newErrBatchCorrupted("too short") - } - - seq = binary.LittleEndian.Uint64(data) - batchLen = int(binary.LittleEndian.Uint32(data[8:])) - if batchLen < 0 { - return 0, 0, newErrBatchCorrupted("invalid records length") - } - return -} - -func batchesLen(batches []*Batch) int { - batchLen := 0 - for _, batch := range batches { - batchLen += batch.Len() - } - return batchLen -} - -func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error { - if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil { - return err - } - for _, batch := range batches { - if _, err := wr.Write(batch.data); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go deleted file mode 100644 index c5940b23..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ /dev/null @@ -1,705 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package cache provides interface and implementation of a cache algorithms. -package cache - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Cacher provides interface to implements a caching functionality. -// An implementation must be safe for concurrent use. -type Cacher interface { - // Capacity returns cache capacity. - Capacity() int - - // SetCapacity sets cache capacity. - SetCapacity(capacity int) - - // Promote promotes the 'cache node'. - Promote(n *Node) - - // Ban evicts the 'cache node' and prevent subsequent 'promote'. - Ban(n *Node) - - // Evict evicts the 'cache node'. - Evict(n *Node) - - // EvictNS evicts 'cache node' with the given namespace. - EvictNS(ns uint64) - - // EvictAll evicts all 'cache node'. - EvictAll() - - // Close closes the 'cache tree' - Close() error -} - -// Value is a 'cacheable object'. It may implements util.Releaser, if -// so the the Release method will be called once object is released. -type Value interface{} - -// NamespaceGetter provides convenient wrapper for namespace. -type NamespaceGetter struct { - Cache *Cache - NS uint64 -} - -// Get simply calls Cache.Get() method. -func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { - return g.Cache.Get(g.NS, key, setFunc) -} - -// The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, -// Kunlong Zhang, and Michael Spear. -// ACM Symposium on Principles of Distributed Computing, Jul 2014. - -const ( - mInitialSize = 1 << 4 - mOverflowThreshold = 1 << 5 - mOverflowGrowThreshold = 1 << 7 -) - -type mBucket struct { - mu sync.Mutex - node []*Node - frozen bool -} - -func (b *mBucket) freeze() []*Node { - b.mu.Lock() - defer b.mu.Unlock() - if !b.frozen { - b.frozen = true - } - return b.node -} - -func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - for _, n := range b.node { - if n.hash == hash && n.ns == ns && n.key == key { - atomic.AddInt32(&n.ref, 1) - b.mu.Unlock() - return true, false, n - } - } - - // Get only. - if noset { - b.mu.Unlock() - return true, false, nil - } - - // Create node. - n = &Node{ - r: r, - hash: hash, - ns: ns, - key: key, - ref: 1, - } - // Add node to bucket. - b.node = append(b.node, n) - bLen := len(b.node) - b.mu.Unlock() - - // Update counter. - grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold - if bLen > mOverflowThreshold { - grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold - } - - // Grow. - if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) << 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - - return true, true, n -} - -func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - var ( - n *Node - bLen int - ) - for i := range b.node { - n = b.node[i] - if n.ns == ns && n.key == key { - if atomic.LoadInt32(&n.ref) == 0 { - deleted = true - - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Remove node from bucket. - b.node = append(b.node[:i], b.node[i+1:]...) - bLen = len(b.node) - } - break - } - } - b.mu.Unlock() - - if deleted { - // Call OnDel. - for _, f := range n.onDel { - f() - } - - // Update counter. - atomic.AddInt32(&r.size, int32(n.size)*-1) - shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold - if bLen >= mOverflowThreshold { - atomic.AddInt32(&h.overflow, -1) - } - - // Shrink. - if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) >> 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - } - - return true, deleted -} - -type mNode struct { - buckets []unsafe.Pointer // []*mBucket - mask uint32 - pred unsafe.Pointer // *mNode - resizeInProgess int32 - - overflow int32 - growThreshold int32 - shrinkThreshold int32 -} - -func (n *mNode) initBucket(i uint32) *mBucket { - if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { - return b - } - - p := (*mNode)(atomic.LoadPointer(&n.pred)) - if p != nil { - var node []*Node - if n.mask > p.mask { - // Grow. - pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) - if pb == nil { - pb = p.initBucket(i & p.mask) - } - m := pb.freeze() - // Split nodes. - for _, x := range m { - if x.hash&n.mask == i { - node = append(node, x) - } - } - } else { - // Shrink. - pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) - if pb0 == nil { - pb0 = p.initBucket(i) - } - pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) - if pb1 == nil { - pb1 = p.initBucket(i + uint32(len(n.buckets))) - } - m0 := pb0.freeze() - m1 := pb1.freeze() - // Merge nodes. - node = make([]*Node, 0, len(m0)+len(m1)) - node = append(node, m0...) - node = append(node, m1...) - } - b := &mBucket{node: node} - if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { - if len(node) > mOverflowThreshold { - atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) - } - return b - } - } - - return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) -} - -func (n *mNode) initBuckets() { - for i := range n.buckets { - n.initBucket(uint32(i)) - } - atomic.StorePointer(&n.pred, nil) -} - -// Cache is a 'cache map'. -type Cache struct { - mu sync.RWMutex - mHead unsafe.Pointer // *mNode - nodes int32 - size int32 - cacher Cacher - closed bool -} - -// NewCache creates a new 'cache map'. The cacher is optional and -// may be nil. -func NewCache(cacher Cacher) *Cache { - h := &mNode{ - buckets: make([]unsafe.Pointer, mInitialSize), - mask: mInitialSize - 1, - growThreshold: int32(mInitialSize * mOverflowThreshold), - shrinkThreshold: 0, - } - for i := range h.buckets { - h.buckets[i] = unsafe.Pointer(&mBucket{}) - } - r := &Cache{ - mHead: unsafe.Pointer(h), - cacher: cacher, - } - return r -} - -func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { - h := (*mNode)(atomic.LoadPointer(&r.mHead)) - i := hash & h.mask - b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) - if b == nil { - b = h.initBucket(i) - } - return h, b -} - -func (r *Cache) delete(n *Node) bool { - for { - h, b := r.getBucket(n.hash) - done, deleted := b.delete(r, h, n.hash, n.ns, n.key) - if done { - return deleted - } - } - return false -} - -// Nodes returns number of 'cache node' in the map. -func (r *Cache) Nodes() int { - return int(atomic.LoadInt32(&r.nodes)) -} - -// Size returns sums of 'cache node' size in the map. -func (r *Cache) Size() int { - return int(atomic.LoadInt32(&r.size)) -} - -// Capacity returns cache capacity. -func (r *Cache) Capacity() int { - if r.cacher == nil { - return 0 - } - return r.cacher.Capacity() -} - -// SetCapacity sets cache capacity. -func (r *Cache) SetCapacity(capacity int) { - if r.cacher != nil { - r.cacher.SetCapacity(capacity) - } -} - -// Get gets 'cache node' with the given namespace and key. -// If cache node is not found and setFunc is not nil, Get will atomically creates -// the 'cache node' by calling setFunc. Otherwise Get will returns nil. -// -// The returned 'cache handle' should be released after use by calling Release -// method. -func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return nil - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) - if done { - if n != nil { - n.mu.Lock() - if n.value == nil { - if setFunc == nil { - n.mu.Unlock() - n.unref() - return nil - } - - n.size, n.value = setFunc() - if n.value == nil { - n.size = 0 - n.mu.Unlock() - n.unref() - return nil - } - atomic.AddInt32(&r.size, int32(n.size)) - } - n.mu.Unlock() - if r.cacher != nil { - r.cacher.Promote(n) - } - return &Handle{unsafe.Pointer(n)} - } - - break - } - } - return nil -} - -// Delete removes and ban 'cache node' with the given namespace and key. -// A banned 'cache node' will never inserted into the 'cache tree'. Ban -// only attributed to the particular 'cache node', so when a 'cache node' -// is recreated it will not be banned. -// -// If onDel is not nil, then it will be executed if such 'cache node' -// doesn't exist or once the 'cache node' is released. -// -// Delete return true is such 'cache node' exist. -func (r *Cache) Delete(ns, key uint64, onDel func()) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if onDel != nil { - n.mu.Lock() - n.onDel = append(n.onDel, onDel) - n.mu.Unlock() - } - if r.cacher != nil { - r.cacher.Ban(n) - } - n.unref() - return true - } - - break - } - } - - if onDel != nil { - onDel() - } - - return false -} - -// Evict evicts 'cache node' with the given namespace and key. This will -// simply call Cacher.Evict. -// -// Evict return true is such 'cache node' exist. -func (r *Cache) Evict(ns, key uint64) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if r.cacher != nil { - r.cacher.Evict(n) - } - n.unref() - return true - } - - break - } - } - - return false -} - -// EvictNS evicts 'cache node' with the given namespace. This will -// simply call Cacher.EvictNS. -func (r *Cache) EvictNS(ns uint64) { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictNS(ns) - } -} - -// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. -func (r *Cache) EvictAll() { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictAll() - } -} - -// Close closes the 'cache map' and forcefully releases all 'cache node'. -func (r *Cache) Close() error { - r.mu.Lock() - if !r.closed { - r.closed = true - - h := (*mNode)(r.mHead) - h.initBuckets() - - for i := range h.buckets { - b := (*mBucket)(h.buckets[i]) - for _, n := range b.node { - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Call OnDel. - for _, f := range n.onDel { - f() - } - n.onDel = nil - } - } - } - r.mu.Unlock() - - // Avoid deadlock. - if r.cacher != nil { - if err := r.cacher.Close(); err != nil { - return err - } - } - return nil -} - -// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but -// unlike Close it doesn't forcefully releases 'cache node'. -func (r *Cache) CloseWeak() error { - r.mu.Lock() - if !r.closed { - r.closed = true - } - r.mu.Unlock() - - // Avoid deadlock. - if r.cacher != nil { - r.cacher.EvictAll() - if err := r.cacher.Close(); err != nil { - return err - } - } - return nil -} - -// Node is a 'cache node'. -type Node struct { - r *Cache - - hash uint32 - ns, key uint64 - - mu sync.Mutex - size int - value Value - - ref int32 - onDel []func() - - CacheData unsafe.Pointer -} - -// NS returns this 'cache node' namespace. -func (n *Node) NS() uint64 { - return n.ns -} - -// Key returns this 'cache node' key. -func (n *Node) Key() uint64 { - return n.key -} - -// Size returns this 'cache node' size. -func (n *Node) Size() int { - return n.size -} - -// Value returns this 'cache node' value. -func (n *Node) Value() Value { - return n.value -} - -// Ref returns this 'cache node' ref counter. -func (n *Node) Ref() int32 { - return atomic.LoadInt32(&n.ref) -} - -// GetHandle returns an handle for this 'cache node'. -func (n *Node) GetHandle() *Handle { - if atomic.AddInt32(&n.ref, 1) <= 1 { - panic("BUG: Node.GetHandle on zero ref") - } - return &Handle{unsafe.Pointer(n)} -} - -func (n *Node) unref() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.delete(n) - } -} - -func (n *Node) unrefLocked() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.mu.RLock() - if !n.r.closed { - n.r.delete(n) - } - n.r.mu.RUnlock() - } -} - -// Handle is a 'cache handle' of a 'cache node'. -type Handle struct { - n unsafe.Pointer // *Node -} - -// Value returns the value of the 'cache node'. -func (h *Handle) Value() Value { - n := (*Node)(atomic.LoadPointer(&h.n)) - if n != nil { - return n.value - } - return nil -} - -// Release releases this 'cache handle'. -// It is safe to call release multiple times. -func (h *Handle) Release() { - nPtr := atomic.LoadPointer(&h.n) - if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { - n := (*Node)(nPtr) - n.unrefLocked() - } -} - -func murmur32(ns, key uint64, seed uint32) uint32 { - const ( - m = uint32(0x5bd1e995) - r = 24 - ) - - k1 := uint32(ns >> 32) - k2 := uint32(ns) - k3 := uint32(key >> 32) - k4 := uint32(key) - - k1 *= m - k1 ^= k1 >> r - k1 *= m - - k2 *= m - k2 ^= k2 >> r - k2 *= m - - k3 *= m - k3 ^= k3 >> r - k3 *= m - - k4 *= m - k4 ^= k4 >> r - k4 *= m - - h := seed - - h *= m - h ^= k1 - h *= m - h ^= k2 - h *= m - h ^= k3 - h *= m - h ^= k4 - - h ^= h >> 13 - h *= m - h ^= h >> 15 - - return h -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go deleted file mode 100644 index d9a84cde..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "unsafe" -) - -type lruNode struct { - n *Node - h *Handle - ban bool - - next, prev *lruNode -} - -func (n *lruNode) insert(at *lruNode) { - x := at.next - at.next = n - n.prev = at - n.next = x - x.prev = n -} - -func (n *lruNode) remove() { - if n.prev != nil { - n.prev.next = n.next - n.next.prev = n.prev - n.prev = nil - n.next = nil - } else { - panic("BUG: removing removed node") - } -} - -type lru struct { - mu sync.Mutex - capacity int - used int - recent lruNode -} - -func (r *lru) reset() { - r.recent.next = &r.recent - r.recent.prev = &r.recent - r.used = 0 -} - -func (r *lru) Capacity() int { - r.mu.Lock() - defer r.mu.Unlock() - return r.capacity -} - -func (r *lru) SetCapacity(capacity int) { - var evicted []*lruNode - - r.mu.Lock() - r.capacity = capacity - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Promote(n *Node) { - var evicted []*lruNode - - r.mu.Lock() - if n.CacheData == nil { - if n.Size() <= r.capacity { - rn := &lruNode{n: n, h: n.GetHandle()} - rn.insert(&r.recent) - n.CacheData = unsafe.Pointer(rn) - r.used += n.Size() - - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.insert(&r.recent) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Ban(n *Node) { - r.mu.Lock() - if n.CacheData == nil { - n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.ban = true - r.used -= rn.n.Size() - r.mu.Unlock() - - rn.h.Release() - rn.h = nil - return - } - } - r.mu.Unlock() -} - -func (r *lru) Evict(n *Node) { - r.mu.Lock() - rn := (*lruNode)(n.CacheData) - if rn == nil || rn.ban { - r.mu.Unlock() - return - } - n.CacheData = nil - r.mu.Unlock() - - rn.h.Release() -} - -func (r *lru) EvictNS(ns uint64) { - var evicted []*lruNode - - r.mu.Lock() - for e := r.recent.prev; e != &r.recent; { - rn := e - e = e.prev - if rn.n.NS() == ns { - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) EvictAll() { - r.mu.Lock() - back := r.recent.prev - for rn := back; rn != &r.recent; rn = rn.prev { - rn.n.CacheData = nil - } - r.reset() - r.mu.Unlock() - - for rn := back; rn != &r.recent; rn = rn.prev { - rn.h.Release() - } -} - -func (r *lru) Close() error { - return nil -} - -// NewLRU create a new LRU-cache. -func NewLRU(capacity int) Cacher { - r := &lru{capacity: capacity} - r.reset() - return r -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go deleted file mode 100644 index 448402b8..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -type iComparer struct { - ucmp comparer.Comparer -} - -func (icmp *iComparer) uName() string { - return icmp.ucmp.Name() -} - -func (icmp *iComparer) uCompare(a, b []byte) int { - return icmp.ucmp.Compare(a, b) -} - -func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { - return icmp.ucmp.Separator(dst, a, b) -} - -func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { - return icmp.ucmp.Successor(dst, b) -} - -func (icmp *iComparer) Name() string { - return icmp.uName() -} - -func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey()) - if x == 0 { - if m, n := internalKey(a).num(), internalKey(b).num(); m > n { - return -1 - } else if m < n { - return 1 - } - } - return x -} - -func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := internalKey(a).ukey(), internalKey(b).ukey() - dst = icmp.uSeparator(dst, ua, ub) - if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - // Append earliest possible number. - return append(dst, keyMaxNumBytes...) - } - return nil -} - -func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := internalKey(b).ukey() - dst = icmp.uSuccessor(dst, ub) - if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - // Append earliest possible number. - return append(dst, keyMaxNumBytes...) - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go deleted file mode 100644 index 14dddf88..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package comparer - -import "bytes" - -type bytesComparer struct{} - -func (bytesComparer) Compare(a, b []byte) int { - return bytes.Compare(a, b) -} - -func (bytesComparer) Name() string { - return "leveldb.BytewiseComparator" -} - -func (bytesComparer) Separator(dst, a, b []byte) []byte { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && a[i] == b[i]; i++ { - } - if i >= n { - // Do not shorten if one string is a prefix of the other - } else if c := a[i]; c < 0xff && c+1 < b[i] { - dst = append(dst, a[:i+1]...) - dst[i]++ - return dst - } - return nil -} - -func (bytesComparer) Successor(dst, b []byte) []byte { - for i, c := range b { - if c != 0xff { - dst = append(dst, b[:i+1]...) - dst[i]++ - return dst - } - } - return nil -} - -// DefaultComparer are default implementation of the Comparer interface. -// It uses the natural ordering, consistent with bytes.Compare. -var DefaultComparer = bytesComparer{} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go deleted file mode 100644 index 14a28f16..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package comparer provides interface and implementation for ordering -// sets of data. -package comparer - -// BasicComparer is the interface that wraps the basic Compare method. -type BasicComparer interface { - // Compare returns -1, 0, or +1 depending on whether a is 'less than', - // 'equal to' or 'greater than' b. The two arguments can only be 'equal' - // if their contents are exactly equal. Furthermore, the empty slice - // must be 'less than' any non-empty slice. - Compare(a, b []byte) int -} - -// Comparer defines a total ordering over the space of []byte keys: a 'less -// than' relationship. -type Comparer interface { - BasicComparer - - // Name returns name of the comparer. - // - // The Level-DB on-disk format stores the comparer name, and opening a - // database with a different comparer from the one it was created with - // will result in an error. - // - // An implementation to a new name whenever the comparer implementation - // changes in a way that will cause the relative ordering of any two keys - // to change. - // - // Names starting with "leveldb." are reserved and should not be used - // by any users of this package. - Name() string - - // Bellow are advanced functions used used to reduce the space requirements - // for internal data structures such as index blocks. - - // Separator appends a sequence of bytes x to dst such that a <= x && x < b, - // where 'less than' is consistent with Compare. An implementation should - // return nil if x equal to a. - // - // Either contents of a or b should not by any means modified. Doing so - // may cause corruption on the internal state. - Separator(dst, a, b []byte) []byte - - // Successor appends a sequence of bytes x to dst such that x >= b, where - // 'less than' is consistent with Compare. An implementation should return - // nil if x equal to b. - // - // Contents of b should not by any means modified. Doing so may cause - // corruption on the internal state. - Successor(dst, b []byte) []byte -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go deleted file mode 100644 index e7ac0654..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db.go +++ /dev/null @@ -1,1175 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "io" - "os" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// DB is a LevelDB database. -type DB struct { - // Need 64-bit alignment. - seq uint64 - - // Stats. Need 64-bit alignment. - cWriteDelay int64 // The cumulative duration of write delays - cWriteDelayN int32 // The cumulative number of write delays - inWritePaused int32 // The indicator whether write operation is paused by compaction - aliveSnaps, aliveIters int32 - - // Session. - s *session - - // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFd storage.FileDesc - frozenJournalFd storage.FileDesc - frozenSeq uint64 - - // Snapshot. - snapsMu sync.Mutex - snapsList *list.List - - // Write. - batchPool sync.Pool - writeMergeC chan writeMerge - writeMergedC chan bool - writeLockC chan struct{} - writeAckC chan error - writeDelay time.Duration - writeDelayN int - tr *Transaction - - // Compaction. - compCommitLk sync.Mutex - tcompCmdC chan cCmd - tcompPauseC chan chan<- struct{} - mcompCmdC chan cCmd - compErrC chan error - compPerErrC chan error - compErrSetC chan error - compWriteLocking bool - compStats cStats - memdbMaxLevel int // For testing. - - // Close. - closeW sync.WaitGroup - closeC chan struct{} - closed uint32 - closer io.Closer -} - -func openDB(s *session) (*DB, error) { - s.log("db@open opening") - start := time.Now() - db := &DB{ - s: s, - // Initial sequence - seq: s.stSeqNum, - // MemDB - memPool: make(chan *memdb.DB, 1), - // Snapshot - snapsList: list.New(), - // Write - batchPool: sync.Pool{New: newBatch}, - writeMergeC: make(chan writeMerge), - writeMergedC: make(chan bool), - writeLockC: make(chan struct{}, 1), - writeAckC: make(chan error), - // Compaction - tcompCmdC: make(chan cCmd), - tcompPauseC: make(chan chan<- struct{}), - mcompCmdC: make(chan cCmd), - compErrC: make(chan error), - compPerErrC: make(chan error), - compErrSetC: make(chan error), - // Close - closeC: make(chan struct{}), - } - - // Read-only mode. - readOnly := s.o.GetReadOnly() - - if readOnly { - // Recover journals (read-only mode). - if err := db.recoverJournalRO(); err != nil { - return nil, err - } - } else { - // Recover journals. - if err := db.recoverJournal(); err != nil { - return nil, err - } - - // Remove any obsolete files. - if err := db.checkAndCleanFiles(); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return nil, err - } - - } - - // Doesn't need to be included in the wait group. - go db.compactionError() - go db.mpoolDrain() - - if readOnly { - db.SetReadOnly() - } else { - db.closeW.Add(2) - go db.tCompaction() - go db.mCompaction() - // go db.jWriter() - } - - s.logf("db@open done T·%v", time.Since(start)) - - runtime.SetFinalizer(db, (*DB).Close) - return db, nil -} - -// Open opens or creates a DB for the given storage. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist Open will returns -// os.ErrExist error. -// -// Open will return an error with type of ErrCorrupted if corruption -// detected in the DB. Use errors.IsCorrupted to test whether an error is -// due to corruption. Corrupted DB can be recovered with Recover function. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = s.recover() - if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { - return - } - err = s.create() - if err != nil { - return - } - } else if s.o.GetErrorIfExist() { - err = os.ErrExist - return - } - - return openDB(s) -} - -// OpenFile opens or creates a DB for the given path. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist OpenFile will returns -// os.ErrExist error. -// -// OpenFile uses standard file-system backed storage implementation as -// described in the leveldb/storage package. -// -// OpenFile will return an error with type of ErrCorrupted if corruption -// detected in the DB. Use errors.IsCorrupted to test whether an error is -// due to corruption. Corrupted DB can be recovered with Recover function. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path, o.GetReadOnly()) - if err != nil { - return - } - db, err = Open(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -// Recover recovers and opens a DB with missing or corrupted manifest files -// for the given storage. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = recoverTable(s, o) - if err != nil { - return - } - return openDB(s) -} - -// RecoverFile recovers and opens a DB with missing or corrupted manifest files -// for the given path. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// RecoverFile uses standard file-system backed storage implementation as described -// in the leveldb/storage package. -// -// The returned DB instance is safe for concurrent use. -// The DB must be closed after use, by calling Close method. -func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path, false) - if err != nil { - return - } - db, err = Recover(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -func recoverTable(s *session, o *opt.Options) error { - o = dupOptions(o) - // Mask StrictReader, lets StrictRecovery doing its job. - o.Strict &= ^opt.StrictReader - - // Get all tables and sort it by file number. - fds, err := s.stor.List(storage.TypeTable) - if err != nil { - return err - } - sortFds(fds) - - var ( - maxSeq uint64 - recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int - - // We will drop corrupted table. - strict = o.GetStrict(opt.StrictRecovery) - noSync = o.GetNoSync() - - rec = &sessionRecord{} - bpool = util.NewBufferPool(o.GetBlockSize() + 5) - ) - buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) { - tmpFd = s.newTemp() - writer, err := s.stor.Create(tmpFd) - if err != nil { - return - } - defer func() { - writer.Close() - if err != nil { - s.stor.Remove(tmpFd) - tmpFd = storage.FileDesc{} - } - }() - - // Copy entries. - tw := table.NewWriter(writer, o) - for iter.Next() { - key := iter.Key() - if validInternalKey(key) { - err = tw.Append(key, iter.Value()) - if err != nil { - return - } - } - } - err = iter.Error() - if err != nil && !errors.IsCorrupted(err) { - return - } - err = tw.Close() - if err != nil { - return - } - if !noSync { - err = writer.Sync() - if err != nil { - return - } - } - size = int64(tw.BytesLen()) - return - } - recoverTable := func(fd storage.FileDesc) error { - s.logf("table@recovery recovering @%d", fd.Num) - reader, err := s.stor.Open(fd) - if err != nil { - return err - } - var closed bool - defer func() { - if !closed { - reader.Close() - } - }() - - // Get file size. - size, err := reader.Seek(0, 2) - if err != nil { - return err - } - - var ( - tSeq uint64 - tgoodKey, tcorruptedKey, tcorruptedBlock int - imin, imax []byte - ) - tr, err := table.NewReader(reader, size, fd, nil, bpool, o) - if err != nil { - return err - } - iter := tr.NewIterator(nil, nil) - if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { - itererr.SetErrorCallback(func(err error) { - if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", fd.Num, err) - tcorruptedBlock++ - } - }) - } - - // Scan the table. - for iter.Next() { - key := iter.Key() - _, seq, _, kerr := parseInternalKey(key) - if kerr != nil { - tcorruptedKey++ - continue - } - tgoodKey++ - if seq > tSeq { - tSeq = seq - } - if imin == nil { - imin = append([]byte{}, key...) - } - imax = append(imax[:0], key...) - } - if err := iter.Error(); err != nil && !errors.IsCorrupted(err) { - iter.Release() - return err - } - iter.Release() - - goodKey += tgoodKey - corruptedKey += tcorruptedKey - corruptedBlock += tcorruptedBlock - - if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { - droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - return nil - } - - if tgoodKey > 0 { - if tcorruptedKey > 0 || tcorruptedBlock > 0 { - // Rebuild the table. - s.logf("table@recovery rebuilding @%d", fd.Num) - iter := tr.NewIterator(nil, nil) - tmpFd, newSize, err := buildTable(iter) - iter.Release() - if err != nil { - return err - } - closed = true - reader.Close() - if err := s.stor.Rename(tmpFd, fd); err != nil { - return err - } - size = newSize - } - if tSeq > maxSeq { - maxSeq = tSeq - } - recoveredKey += tgoodKey - // Add table to level 0. - rec.addTable(0, fd.Num, size, imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - } else { - droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size) - } - - return nil - } - - // Recover all tables. - if len(fds) > 0 { - s.logf("table@recovery F·%d", len(fds)) - - // Mark file number as used. - s.markFileNum(fds[len(fds)-1].Num) - - for _, fd := range fds { - if err := recoverTable(fd); err != nil { - return err - } - } - - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq) - } - - // Set sequence number. - rec.setSeqNum(maxSeq) - - // Create new manifest. - if err := s.create(); err != nil { - return err - } - - // Commit. - return s.commit(rec) -} - -func (db *DB) recoverJournal() error { - // Get all journals and sort it by file number. - rawFds, err := db.s.stor.List(storage.TypeJournal) - if err != nil { - return err - } - sortFds(rawFds) - - // Journals that will be recovered. - var fds []storage.FileDesc - for _, fd := range rawFds { - if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { - fds = append(fds, fd) - } - } - - var ( - ofd storage.FileDesc // Obsolete file. - rec = &sessionRecord{} - ) - - // Recover journals. - if len(fds) > 0 { - db.logf("journal@recovery F·%d", len(fds)) - - // Mark file number as used. - db.s.markFileNum(fds[len(fds)-1].Num) - - var ( - // Options. - strict = db.s.o.GetStrict(opt.StrictJournal) - checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer = db.s.o.GetWriteBuffer() - - jr *journal.Reader - mdb = memdb.New(db.s.icmp, writeBuffer) - buf = &util.Buffer{} - batchSeq uint64 - batchLen int - ) - - for _, fd := range fds { - db.logf("journal@recovery recovering @%d", fd.Num) - - fr, err := db.s.stor.Open(fd) - if err != nil { - return err - } - - // Create or reset journal reader instance. - if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) - } else { - jr.Reset(fr, dropper{db.s, fd}, strict, checksum) - } - - // Flush memdb and remove obsolete journal file. - if !ofd.Zero() { - if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - fr.Close() - return err - } - } - - rec.setJournalNum(fd.Num) - rec.setSeqNum(db.seq) - if err := db.s.commit(rec); err != nil { - fr.Close() - return err - } - rec.resetAddedTables() - - db.s.stor.Remove(ofd) - ofd = storage.FileDesc{} - } - - // Replay journal to memdb. - mdb.Reset() - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - - fr.Close() - return errors.SetFd(err, fd) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) - if err != nil { - if !strict && errors.IsCorrupted(err) { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - - // Save sequence number. - db.seq = batchSeq + uint64(batchLen) - - // Flush it if large enough. - if mdb.Size() >= writeBuffer { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - fr.Close() - return err - } - - mdb.Reset() - } - } - - fr.Close() - ofd = fd - } - - // Flush the last memdb. - if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - return err - } - } - } - - // Create a new journal. - if _, err := db.newMem(0); err != nil { - return err - } - - // Commit. - rec.setJournalNum(db.journalFd.Num) - rec.setSeqNum(db.seq) - if err := db.s.commit(rec); err != nil { - // Close journal on error. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return err - } - - // Remove the last obsolete journal file. - if !ofd.Zero() { - db.s.stor.Remove(ofd) - } - - return nil -} - -func (db *DB) recoverJournalRO() error { - // Get all journals and sort it by file number. - rawFds, err := db.s.stor.List(storage.TypeJournal) - if err != nil { - return err - } - sortFds(rawFds) - - // Journals that will be recovered. - var fds []storage.FileDesc - for _, fd := range rawFds { - if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { - fds = append(fds, fd) - } - } - - var ( - // Options. - strict = db.s.o.GetStrict(opt.StrictJournal) - checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer = db.s.o.GetWriteBuffer() - - mdb = memdb.New(db.s.icmp, writeBuffer) - ) - - // Recover journals. - if len(fds) > 0 { - db.logf("journal@recovery RO·Mode F·%d", len(fds)) - - var ( - jr *journal.Reader - buf = &util.Buffer{} - batchSeq uint64 - batchLen int - ) - - for _, fd := range fds { - db.logf("journal@recovery recovering @%d", fd.Num) - - fr, err := db.s.stor.Open(fd) - if err != nil { - return err - } - - // Create or reset journal reader instance. - if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) - } else { - jr.Reset(fr, dropper{db.s, fd}, strict, checksum) - } - - // Replay journal to memdb. - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - - fr.Close() - return errors.SetFd(err, fd) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) - if err != nil { - if !strict && errors.IsCorrupted(err) { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - - fr.Close() - return errors.SetFd(err, fd) - } - - // Save sequence number. - db.seq = batchSeq + uint64(batchLen) - } - - fr.Close() - } - } - - // Set memDB. - db.mem = &memDB{db: db, DB: mdb, ref: 1} - - return nil -} - -func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) { - mk, mv, err := mdb.Find(ikey) - if err == nil { - ukey, _, kt, kerr := parseInternalKey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if icmp.uCompare(ukey, ikey.ukey()) == 0 { - if kt == keyTypeDel { - return true, nil, ErrNotFound - } - return true, mv, nil - - } - } else if err != ErrNotFound { - return true, nil, err - } - return -} - -func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := makeInternalKey(nil, key, seq, keyTypeSeek) - - if auxm != nil { - if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { - return append([]byte{}, mv...), me - } - } - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { - return append([]byte{}, mv...), me - } - } - - v := db.s.version() - value, cSched, err := v.get(auxt, ikey, ro, false) - v.release() - if cSched { - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) - } - return -} - -func nilIfNotFound(err error) error { - if err == ErrNotFound { - return nil - } - return err -} - -func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := makeInternalKey(nil, key, seq, keyTypeSeek) - - if auxm != nil { - if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok { - return me == nil, nilIfNotFound(me) - } - } - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok { - return me == nil, nilIfNotFound(me) - } - } - - v := db.s.version() - _, cSched, err := v.get(auxt, ikey, ro, true) - v.release() - if cSched { - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) - } - if err == nil { - ret = true - } else if err == ErrNotFound { - err = nil - } - return -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.get(nil, nil, key, se.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Has returns. -func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.has(nil, nil, key, se.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the -// underlying DB. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - // Iterator holds 'version' lock, 'version' is immutable so snapshot - // can be released after iterator created. - return db.newIterator(nil, nil, se.seq, slice, ro) -} - -// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot -// is a frozen snapshot of a DB state at a particular point in time. The -// content of snapshot are guaranteed to be consistent. -// -// The snapshot must be released after use, by calling Release method. -func (db *DB) GetSnapshot() (*Snapshot, error) { - if err := db.ok(); err != nil { - return nil, err - } - - return db.newSnapshot(), nil -} - -// GetProperty returns value of the given property name. -// -// Property names: -// leveldb.num-files-at-level{n} -// Returns the number of files at level 'n'. -// leveldb.stats -// Returns statistics of the underlying DB. -// leveldb.iostats -// Returns statistics of effective disk read and write. -// leveldb.writedelay -// Returns cumulative write delay caused by compaction. -// leveldb.sstables -// Returns sstables list for each level. -// leveldb.blockpool -// Returns block pool stats. -// leveldb.cachedblock -// Returns size of cached block. -// leveldb.openedtables -// Returns number of opened tables. -// leveldb.alivesnaps -// Returns number of alive snapshots. -// leveldb.aliveiters -// Returns number of alive iterators. -func (db *DB) GetProperty(name string) (value string, err error) { - err = db.ok() - if err != nil { - return - } - - const prefix = "leveldb." - if !strings.HasPrefix(name, prefix) { - return "", ErrNotFound - } - p := name[len(prefix):] - - v := db.s.version() - defer v.release() - - numFilesPrefix := "num-files-at-level" - switch { - case strings.HasPrefix(p, numFilesPrefix): - var level uint - var rest string - n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 { - err = ErrNotFound - } else { - value = fmt.Sprint(v.tLen(int(level))) - } - case p == "stats": - value = "Compactions\n" + - " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + - "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.levels { - duration, read, write := db.compStats.getStat(level) - if len(tables) == 0 && duration == 0 { - continue - } - value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", - level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), - float64(read)/1048576.0, float64(write)/1048576.0) - } - case p == "iostats": - value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f", - float64(db.s.stor.reads())/1048576.0, - float64(db.s.stor.writes())/1048576.0) - case p == "writedelay": - writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay)) - paused := atomic.LoadInt32(&db.inWritePaused) == 1 - value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused) - case p == "sstables": - for level, tables := range v.levels { - value += fmt.Sprintf("--- level %d ---\n", level) - for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax) - } - } - case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) - case p == "cachedblock": - if db.s.tops.bcache != nil { - value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) - } else { - value = "" - } - case p == "openedtables": - value = fmt.Sprintf("%d", db.s.tops.cache.Size()) - case p == "alivesnaps": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) - case p == "aliveiters": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) - default: - err = ErrNotFound - } - - return -} - -// DBStats is database statistics. -type DBStats struct { - WriteDelayCount int32 - WriteDelayDuration time.Duration - WritePaused bool - - AliveSnapshots int32 - AliveIterators int32 - - IOWrite uint64 - IORead uint64 - - BlockCacheSize int - OpenedTablesCount int - - LevelSizes []int64 - LevelTablesCounts []int - LevelRead []int64 - LevelWrite []int64 - LevelDurations []time.Duration -} - -// Stats populates s with database statistics. -func (db *DB) Stats(s *DBStats) error { - err := db.ok() - if err != nil { - return err - } - - s.IORead = db.s.stor.reads() - s.IOWrite = db.s.stor.writes() - s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN) - s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay)) - s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1 - - s.OpenedTablesCount = db.s.tops.cache.Size() - if db.s.tops.bcache != nil { - s.BlockCacheSize = db.s.tops.bcache.Size() - } else { - s.BlockCacheSize = 0 - } - - s.AliveIterators = atomic.LoadInt32(&db.aliveIters) - s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps) - - s.LevelDurations = s.LevelDurations[:0] - s.LevelRead = s.LevelRead[:0] - s.LevelWrite = s.LevelWrite[:0] - s.LevelSizes = s.LevelSizes[:0] - s.LevelTablesCounts = s.LevelTablesCounts[:0] - - v := db.s.version() - defer v.release() - - for level, tables := range v.levels { - duration, read, write := db.compStats.getStat(level) - if len(tables) == 0 && duration == 0 { - continue - } - s.LevelDurations = append(s.LevelDurations, duration) - s.LevelRead = append(s.LevelRead, read) - s.LevelWrite = append(s.LevelWrite, write) - s.LevelSizes = append(s.LevelSizes, tables.size()) - s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables)) - } - - return nil -} - -// SizeOf calculates approximate sizes of the given key ranges. -// The length of the returned sizes are equal with the length of the given -// ranges. The returned sizes measure storage space usage, so if the user -// data compresses by a factor of ten, the returned sizes will be one-tenth -// the size of the corresponding user data size. -// The results may not include the sizes of recently written data. -func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { - if err := db.ok(); err != nil { - return nil, err - } - - v := db.s.version() - defer v.release() - - sizes := make(Sizes, 0, len(ranges)) - for _, r := range ranges { - imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek) - imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek) - start, err := v.offsetOf(imin) - if err != nil { - return nil, err - } - limit, err := v.offsetOf(imax) - if err != nil { - return nil, err - } - var size int64 - if limit >= start { - size = limit - start - } - sizes = append(sizes, size) - } - - return sizes, nil -} - -// Close closes the DB. This will also releases any outstanding snapshot, -// abort any in-flight compaction and discard open transaction. -// -// It is not safe to close a DB until all outstanding iterators are released. -// It is valid to call Close multiple times. Other methods should not be -// called after the DB has been closed. -func (db *DB) Close() error { - if !db.setClosed() { - return ErrClosed - } - - start := time.Now() - db.log("db@close closing") - - // Clear the finalizer. - runtime.SetFinalizer(db, nil) - - // Get compaction error. - var err error - select { - case err = <-db.compErrC: - if err == ErrReadOnly { - err = nil - } - default: - } - - // Signal all goroutines. - close(db.closeC) - - // Discard open transaction. - if db.tr != nil { - db.tr.Discard() - } - - // Acquire writer lock. - db.writeLockC <- struct{}{} - - // Wait for all gorotines to exit. - db.closeW.Wait() - - // Closes journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - db.journal = nil - db.journalWriter = nil - } - - if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - } - - // Close session. - db.s.close() - db.logf("db@close done T·%v", time.Since(start)) - db.s.release() - - if db.closer != nil { - if err1 := db.closer.Close(); err == nil { - err = err1 - } - db.closer = nil - } - - // Clear memdbs. - db.clearMems() - - return err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go deleted file mode 100644 index 28e50906..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ /dev/null @@ -1,860 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") -) - -type cStat struct { - duration time.Duration - read int64 - write int64 -} - -func (p *cStat) add(n *cStatStaging) { - p.duration += n.duration - p.read += n.read - p.write += n.write -} - -func (p *cStat) get() (duration time.Duration, read, write int64) { - return p.duration, p.read, p.write -} - -type cStatStaging struct { - start time.Time - duration time.Duration - on bool - read int64 - write int64 -} - -func (p *cStatStaging) startTimer() { - if !p.on { - p.start = time.Now() - p.on = true - } -} - -func (p *cStatStaging) stopTimer() { - if p.on { - p.duration += time.Since(p.start) - p.on = false - } -} - -type cStats struct { - lk sync.Mutex - stats []cStat -} - -func (p *cStats) addStat(level int, n *cStatStaging) { - p.lk.Lock() - if level >= len(p.stats) { - newStats := make([]cStat, level+1) - copy(newStats, p.stats) - p.stats = newStats - } - p.stats[level].add(n) - p.lk.Unlock() -} - -func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { - p.lk.Lock() - defer p.lk.Unlock() - if level < len(p.stats) { - return p.stats[level].get() - } - return -} - -func (db *DB) compactionError() { - var err error -noerr: - // No error. - for { - select { - case err = <-db.compErrSetC: - switch { - case err == nil: - case err == ErrReadOnly, errors.IsCorrupted(err): - goto hasperr - default: - goto haserr - } - case <-db.closeC: - return - } - } -haserr: - // Transient error. - for { - select { - case db.compErrC <- err: - case err = <-db.compErrSetC: - switch { - case err == nil: - goto noerr - case err == ErrReadOnly, errors.IsCorrupted(err): - goto hasperr - default: - } - case <-db.closeC: - return - } - } -hasperr: - // Persistent error. - for { - select { - case db.compErrC <- err: - case db.compPerErrC <- err: - case db.writeLockC <- struct{}{}: - // Hold write lock, so that write won't pass-through. - db.compWriteLocking = true - case <-db.closeC: - if db.compWriteLocking { - // We should release the lock or Close will hang. - <-db.writeLockC - } - return - } - } -} - -type compactionTransactCounter int - -func (cnt *compactionTransactCounter) incr() { - *cnt++ -} - -type compactionTransactInterface interface { - run(cnt *compactionTransactCounter) error - revert() error -} - -func (db *DB) compactionTransact(name string, t compactionTransactInterface) { - defer func() { - if x := recover(); x != nil { - if x == errCompactionTransactExiting { - if err := t.revert(); err != nil { - db.logf("%s revert error %q", name, err) - } - } - panic(x) - } - }() - - const ( - backoffMin = 1 * time.Second - backoffMax = 8 * time.Second - backoffMul = 2 * time.Second - ) - var ( - backoff = backoffMin - backoffT = time.NewTimer(backoff) - lastCnt = compactionTransactCounter(0) - - disableBackoff = db.s.o.GetDisableCompactionBackoff() - ) - for n := 0; ; n++ { - // Check whether the DB is closed. - if db.isClosed() { - db.logf("%s exiting", name) - db.compactionExitTransact() - } else if n > 0 { - db.logf("%s retrying N·%d", name, n) - } - - // Execute. - cnt := compactionTransactCounter(0) - err := t.run(&cnt) - if err != nil { - db.logf("%s error I·%d %q", name, cnt, err) - } - - // Set compaction error status. - select { - case db.compErrSetC <- err: - case perr := <-db.compPerErrC: - if err != nil { - db.logf("%s exiting (persistent error %q)", name, perr) - db.compactionExitTransact() - } - case <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - if err == nil { - return - } - if errors.IsCorrupted(err) { - db.logf("%s exiting (corruption detected)", name) - db.compactionExitTransact() - } - - if !disableBackoff { - // Reset backoff duration if counter is advancing. - if cnt > lastCnt { - backoff = backoffMin - lastCnt = cnt - } - - // Backoff. - backoffT.Reset(backoff) - if backoff < backoffMax { - backoff *= backoffMul - if backoff > backoffMax { - backoff = backoffMax - } - } - select { - case <-backoffT.C: - case <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - } - } -} - -type compactionTransactFunc struct { - runFunc func(cnt *compactionTransactCounter) error - revertFunc func() error -} - -func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { - return t.runFunc(cnt) -} - -func (t *compactionTransactFunc) revert() error { - if t.revertFunc != nil { - return t.revertFunc() - } - return nil -} - -func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { - db.compactionTransact(name, &compactionTransactFunc{run, revert}) -} - -func (db *DB) compactionExitTransact() { - panic(errCompactionTransactExiting) -} - -func (db *DB) compactionCommit(name string, rec *sessionRecord) { - db.compCommitLk.Lock() - defer db.compCommitLk.Unlock() // Defer is necessary. - db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { - return db.s.commit(rec) - }, nil) -} - -func (db *DB) memCompaction() { - mdb := db.getFrozenMem() - if mdb == nil { - return - } - defer mdb.decref() - - db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) - - // Don't compact empty memdb. - if mdb.Len() == 0 { - db.logf("memdb@flush skipping") - // drop frozen memdb - db.dropFrozenMem() - return - } - - // Pause table compaction. - resumeC := make(chan struct{}) - select { - case db.tcompPauseC <- (chan<- struct{})(resumeC): - case <-db.compPerErrC: - close(resumeC) - resumeC = nil - case <-db.closeC: - db.compactionExitTransact() - } - - var ( - rec = &sessionRecord{} - stats = &cStatStaging{} - flushLevel int - ) - - // Generate tables. - db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) - stats.stopTimer() - return - }, func() error { - for _, r := range rec.addedTables { - db.logf("memdb@flush revert @%d", r.num) - if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { - return err - } - } - return nil - }) - - rec.setJournalNum(db.journalFd.Num) - rec.setSeqNum(db.frozenSeq) - - // Commit. - stats.startTimer() - db.compactionCommit("memdb", rec) - stats.stopTimer() - - db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) - - for _, r := range rec.addedTables { - stats.write += r.size - } - db.compStats.addStat(flushLevel, stats) - - // Drop frozen memdb. - db.dropFrozenMem() - - // Resume table compaction. - if resumeC != nil { - select { - case <-resumeC: - close(resumeC) - case <-db.closeC: - db.compactionExitTransact() - } - } - - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) -} - -type tableCompactionBuilder struct { - db *DB - s *session - c *compaction - rec *sessionRecord - stat0, stat1 *cStatStaging - - snapHasLastUkey bool - snapLastUkey []byte - snapLastSeq uint64 - snapIter int - snapKerrCnt int - snapDropCnt int - - kerrCnt int - dropCnt int - - minSeq uint64 - strict bool - tableSize int - - tw *tWriter -} - -func (b *tableCompactionBuilder) appendKV(key, value []byte) error { - // Create new table if not already. - if b.tw == nil { - // Check for pause event. - if b.db != nil { - select { - case ch := <-b.db.tcompPauseC: - b.db.pauseCompaction(ch) - case <-b.db.closeC: - b.db.compactionExitTransact() - default: - } - } - - // Create new table. - var err error - b.tw, err = b.s.tops.create() - if err != nil { - return err - } - } - - // Write key/value into table. - return b.tw.append(key, value) -} - -func (b *tableCompactionBuilder) needFlush() bool { - return b.tw.tw.BytesLen() >= b.tableSize -} - -func (b *tableCompactionBuilder) flush() error { - t, err := b.tw.finish() - if err != nil { - return err - } - b.rec.addTableFile(b.c.sourceLevel+1, t) - b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) - b.tw = nil - return nil -} - -func (b *tableCompactionBuilder) cleanup() { - if b.tw != nil { - b.tw.drop() - b.tw = nil - } -} - -func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { - snapResumed := b.snapIter > 0 - hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. - lastUkey := append([]byte{}, b.snapLastUkey...) - lastSeq := b.snapLastSeq - b.kerrCnt = b.snapKerrCnt - b.dropCnt = b.snapDropCnt - // Restore compaction state. - b.c.restore() - - defer b.cleanup() - - b.stat1.startTimer() - defer b.stat1.stopTimer() - - iter := b.c.newIterator() - defer iter.Release() - for i := 0; iter.Next(); i++ { - // Incr transact counter. - cnt.incr() - - // Skip until last state. - if i < b.snapIter { - continue - } - - resumed := false - if snapResumed { - resumed = true - snapResumed = false - } - - ikey := iter.Key() - ukey, seq, kt, kerr := parseInternalKey(ikey) - - if kerr == nil { - shouldStop := !resumed && b.c.shouldStopBefore(ikey) - - if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { - // First occurrence of this user key. - - // Only rotate tables if ukey doesn't hop across. - if b.tw != nil && (shouldStop || b.needFlush()) { - if err := b.flush(); err != nil { - return err - } - - // Creates snapshot of the state. - b.c.save() - b.snapHasLastUkey = hasLastUkey - b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) - b.snapLastSeq = lastSeq - b.snapIter = i - b.snapKerrCnt = b.kerrCnt - b.snapDropCnt = b.dropCnt - } - - hasLastUkey = true - lastUkey = append(lastUkey[:0], ukey...) - lastSeq = keyMaxSeq - } - - switch { - case lastSeq <= b.minSeq: - // Dropped because newer entry for same user key exist - fallthrough // (A) - case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): - // For this user key: - // (1) there is no data in higher levels - // (2) data in lower levels will have larger seq numbers - // (3) data in layers that are being compacted here and have - // smaller seq numbers will be dropped in the next - // few iterations of this loop (by rule (A) above). - // Therefore this deletion marker is obsolete and can be dropped. - lastSeq = seq - b.dropCnt++ - continue - default: - lastSeq = seq - } - } else { - if b.strict { - return kerr - } - - // Don't drop corrupted keys. - hasLastUkey = false - lastUkey = lastUkey[:0] - lastSeq = keyMaxSeq - b.kerrCnt++ - } - - if err := b.appendKV(ikey, iter.Value()); err != nil { - return err - } - } - - if err := iter.Error(); err != nil { - return err - } - - // Finish last table. - if b.tw != nil && !b.tw.empty() { - return b.flush() - } - return nil -} - -func (b *tableCompactionBuilder) revert() error { - for _, at := range b.rec.addedTables { - b.s.logf("table@build revert @%d", at.num) - if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { - return err - } - } - return nil -} - -func (db *DB) tableCompaction(c *compaction, noTrivial bool) { - defer c.release() - - rec := &sessionRecord{} - rec.addCompPtr(c.sourceLevel, c.imax) - - if !noTrivial && c.trivial() { - t := c.levels[0][0] - db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) - rec.delTable(c.sourceLevel, t.fd.Num) - rec.addTableFile(c.sourceLevel+1, t) - db.compactionCommit("table-move", rec) - return - } - - var stats [2]cStatStaging - for i, tables := range c.levels { - for _, t := range tables { - stats[i].read += t.size - // Insert deleted tables into record - rec.delTable(c.sourceLevel+i, t.fd.Num) - } - } - sourceSize := int(stats[0].read + stats[1].read) - minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) - - b := &tableCompactionBuilder{ - db: db, - s: db.s, - c: c, - rec: rec, - stat1: &stats[1], - minSeq: minSeq, - strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), - } - db.compactionTransact("table@build", b) - - // Commit. - stats[1].startTimer() - db.compactionCommit("table", rec) - stats[1].stopTimer() - - resultSize := int(stats[1].write) - db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) - - // Save compaction stats - for i := range stats { - db.compStats.addStat(c.sourceLevel+1, &stats[i]) - } -} - -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { - db.logf("table@compaction range L%d %q:%q", level, umin, umax) - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { - db.tableCompaction(c, true) - } - } else { - // Retry until nothing to compact. - for { - compacted := false - - // Scan for maximum level with overlapped tables. - v := db.s.version() - m := 1 - for i := m; i < len(v.levels); i++ { - tables := v.levels[i] - if tables.overlaps(db.s.icmp, umin, umax, false) { - m = i - } - } - v.release() - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { - db.tableCompaction(c, true) - compacted = true - } - } - - if !compacted { - break - } - } - } - - return nil -} - -func (db *DB) tableAutoCompaction() { - if c := db.s.pickCompaction(); c != nil { - db.tableCompaction(c, false) - } -} - -func (db *DB) tableNeedCompaction() bool { - v := db.s.version() - defer v.release() - return v.needCompaction() -} - -// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted. -func (db *DB) resumeWrite() bool { - v := db.s.version() - defer v.release() - if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { - return true - } - return false -} - -func (db *DB) pauseCompaction(ch chan<- struct{}) { - select { - case ch <- struct{}{}: - case <-db.closeC: - db.compactionExitTransact() - } -} - -type cCmd interface { - ack(err error) -} - -type cAuto struct { - // Note for table compaction, an empty ackC represents it's a compaction waiting command. - ackC chan<- error -} - -func (r cAuto) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -type cRange struct { - level int - min, max []byte - ackC chan<- error -} - -func (r cRange) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -// This will trigger auto compaction but will not wait for it. -func (db *DB) compTrigger(compC chan<- cCmd) { - select { - case compC <- cAuto{}: - default: - } -} - -// This will trigger auto compaction and/or wait for all compaction to be done. -func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cAuto{ch}: - case err = <-db.compErrC: - return - case <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case <-db.closeC: - return ErrClosed - } - return err -} - -// Send range compaction request. -func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cRange{level, min, max, ch}: - case err := <-db.compErrC: - return err - case <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case <-db.closeC: - return ErrClosed - } - return err -} - -func (db *DB) mCompaction() { - var x cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - select { - case x = <-db.mcompCmdC: - switch x.(type) { - case cAuto: - db.memCompaction() - x.ack(nil) - x = nil - default: - panic("leveldb: unknown command") - } - case <-db.closeC: - return - } - } -} - -func (db *DB) tCompaction() { - var ( - x cCmd - ackQ, waitQ []cCmd - ) - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - for i := range ackQ { - ackQ[i].ack(ErrClosed) - ackQ[i] = nil - } - for i := range waitQ { - waitQ[i].ack(ErrClosed) - waitQ[i] = nil - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - if db.tableNeedCompaction() { - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case <-db.closeC: - return - default: - } - // Resume write operation as soon as possible. - if len(waitQ) > 0 && db.resumeWrite() { - for i := range waitQ { - waitQ[i].ack(nil) - waitQ[i] = nil - } - waitQ = waitQ[:0] - } - } else { - for i := range ackQ { - ackQ[i].ack(nil) - ackQ[i] = nil - } - ackQ = ackQ[:0] - for i := range waitQ { - waitQ[i].ack(nil) - waitQ[i] = nil - } - waitQ = waitQ[:0] - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case <-db.closeC: - return - } - } - if x != nil { - switch cmd := x.(type) { - case cAuto: - if cmd.ackC != nil { - waitQ = append(waitQ, x) - } else { - ackQ = append(ackQ, x) - } - case cRange: - x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) - default: - panic("leveldb: unknown command") - } - x = nil - } - db.tableAutoCompaction() - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go deleted file mode 100644 index 03c24cda..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "math/rand" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") -) - -type memdbReleaser struct { - once sync.Once - m *memDB -} - -func (mr *memdbReleaser) Release() { - mr.once.Do(func() { - mr.m.decref() - }) -} - -func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - em, fm := db.getMems() - v := db.s.version() - - tableIts := v.getIterators(slice, ro) - n := len(tableIts) + len(auxt) + 3 - its := make([]iterator.Iterator, 0, n) - - if auxm != nil { - ami := auxm.NewIterator(slice) - ami.SetReleaser(&memdbReleaser{m: auxm}) - its = append(its, ami) - } - for _, t := range auxt { - its = append(its, v.s.tops.newIterator(t, slice, ro)) - } - - emi := em.NewIterator(slice) - emi.SetReleaser(&memdbReleaser{m: em}) - its = append(its, emi) - if fm != nil { - fmi := fm.NewIterator(slice) - fmi.SetReleaser(&memdbReleaser{m: fm}) - its = append(its, fmi) - } - its = append(its, tableIts...) - mi := iterator.NewMergedIterator(its, db.s.icmp, strict) - mi.SetReleaser(&versionReleaser{v: v}) - return mi -} - -func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { - var islice *util.Range - if slice != nil { - islice = &util.Range{} - if slice.Start != nil { - islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) - } - if slice.Limit != nil { - islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) - } - } - rawIter := db.newRawIterator(auxm, auxt, islice, ro) - iter := &dbIter{ - db: db, - icmp: db.s.icmp, - iter: rawIter, - seq: seq, - strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), - key: make([]byte, 0), - value: make([]byte, 0), - } - atomic.AddInt32(&db.aliveIters, 1) - runtime.SetFinalizer(iter, (*dbIter).Release) - return iter -} - -func (db *DB) iterSamplingRate() int { - return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -// dbIter represent an interator states over a database session. -type dbIter struct { - db *DB - icmp *iComparer - iter iterator.Iterator - seq uint64 - strict bool - - smaplingGap int - dir dir - key []byte - value []byte - err error - releaser util.Releaser -} - -func (i *dbIter) sampleSeek() { - ikey := i.iter.Key() - i.smaplingGap -= len(ikey) + len(i.iter.Value()) - for i.smaplingGap < 0 { - i.smaplingGap += i.db.iterSamplingRate() - i.db.sampleSeek(ikey) - } -} - -func (i *dbIter) setErr(err error) { - i.err = err - i.key = nil - i.value = nil -} - -func (i *dbIter) iterErr() { - if err := i.iter.Error(); err != nil { - i.setErr(err) - } -} - -func (i *dbIter) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *dbIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.First() { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.Last() { - return i.prev() - } - i.dir = dirSOI - i.iterErr() - return false -} - -func (i *dbIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) - if i.iter.Seek(ikey) { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) next() bool { - for { - if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - switch kt { - case keyTypeDel: - // Skip deleted key. - i.key = append(i.key[:0], ukey...) - i.dir = dirForward - case keyTypeVal: - if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - i.dir = dirForward - return true - } - } - } - } else if i.strict { - i.setErr(kerr) - break - } - if !i.iter.Next() { - i.dir = dirEOI - i.iterErr() - break - } - } - return false -} - -func (i *dbIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { - i.dir = dirEOI - i.iterErr() - return false - } - return i.next() -} - -func (i *dbIter) prev() bool { - i.dir = dirBackward - del := true - if i.iter.Valid() { - for { - if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - if !del && i.icmp.uCompare(ukey, i.key) < 0 { - return true - } - del = (kt == keyTypeDel) - if !del { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - } - } - } else if i.strict { - i.setErr(kerr) - return false - } - if !i.iter.Prev() { - break - } - } - } - if del { - i.dir = dirSOI - i.iterErr() - return false - } - return true -} - -func (i *dbIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - for i.iter.Prev() { - if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if i.icmp.uCompare(ukey, i.key) < 0 { - goto cont - } - } else if i.strict { - i.setErr(kerr) - return false - } - } - i.dir = dirSOI - i.iterErr() - return false - } - -cont: - return i.prev() -} - -func (i *dbIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *dbIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *dbIter) Release() { - if i.dir != dirReleased { - // Clear the finalizer. - runtime.SetFinalizer(i, nil) - - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - - i.dir = dirReleased - i.key = nil - i.value = nil - i.iter.Release() - i.iter = nil - atomic.AddInt32(&i.db.aliveIters, -1) - i.db = nil - } -} - -func (i *dbIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *dbIter) Error() error { - return i.err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go deleted file mode 100644 index 2c69d2e5..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type snapshotElement struct { - seq uint64 - ref int - e *list.Element -} - -// Acquires a snapshot, based on latest sequence. -func (db *DB) acquireSnapshot() *snapshotElement { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - seq := db.getSeq() - - if e := db.snapsList.Back(); e != nil { - se := e.Value.(*snapshotElement) - if se.seq == seq { - se.ref++ - return se - } else if seq < se.seq { - panic("leveldb: sequence number is not increasing") - } - } - se := &snapshotElement{seq: seq, ref: 1} - se.e = db.snapsList.PushBack(se) - return se -} - -// Releases given snapshot element. -func (db *DB) releaseSnapshot(se *snapshotElement) { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - se.ref-- - if se.ref == 0 { - db.snapsList.Remove(se.e) - se.e = nil - } else if se.ref < 0 { - panic("leveldb: Snapshot: negative element reference") - } -} - -// Gets minimum sequence that not being snapshotted. -func (db *DB) minSeq() uint64 { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - if e := db.snapsList.Front(); e != nil { - return e.Value.(*snapshotElement).seq - } - - return db.getSeq() -} - -// Snapshot is a DB snapshot. -type Snapshot struct { - db *DB - elem *snapshotElement - mu sync.RWMutex - released bool -} - -// Creates new snapshot object. -func (db *DB) newSnapshot() *Snapshot { - snap := &Snapshot{ - db: db, - elem: db.acquireSnapshot(), - } - atomic.AddInt32(&db.aliveSnaps, 1) - runtime.SetFinalizer(snap, (*Snapshot).Release) - return snap -} - -func (snap *Snapshot) String() string { - return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) -} - -// Get gets the value for the given key. It returns ErrNotFound if -// the DB does not contains the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.get(nil, nil, key, snap.elem.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.has(nil, nil, key, snap.elem.seq, ro) -} - -// NewIterator returns an iterator for the snapshot of the underlying DB. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// Releasing the snapshot doesn't mean releasing the iterator too, the -// iterator would be still valid until released. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := snap.db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - return iterator.NewEmptyIterator(ErrSnapshotReleased) - } - // Since iterator already hold version ref, it doesn't need to - // hold snapshot ref. - return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) -} - -// Release releases the snapshot. This will not release any returned -// iterators, the iterators would still be valid until released or the -// underlying DB is closed. -// -// Other methods should not be called after the snapshot has been released. -func (snap *Snapshot) Release() { - snap.mu.Lock() - defer snap.mu.Unlock() - - if !snap.released { - // Clear the finalizer. - runtime.SetFinalizer(snap, nil) - - snap.released = true - snap.db.releaseSnapshot(snap.elem) - atomic.AddInt32(&snap.db.aliveSnaps, -1) - snap.db = nil - snap.elem = nil - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go deleted file mode 100644 index 65e1c54b..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - errHasFrozenMem = errors.New("has frozen mem") -) - -type memDB struct { - db *DB - *memdb.DB - ref int32 -} - -func (m *memDB) getref() int32 { - return atomic.LoadInt32(&m.ref) -} - -func (m *memDB) incref() { - atomic.AddInt32(&m.ref, 1) -} - -func (m *memDB) decref() { - if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { - // Only put back memdb with std capacity. - if m.Capacity() == m.db.s.o.GetWriteBuffer() { - m.Reset() - m.db.mpoolPut(m.DB) - } - m.db = nil - m.DB = nil - } else if ref < 0 { - panic("negative memdb ref") - } -} - -// Get latest sequence number. -func (db *DB) getSeq() uint64 { - return atomic.LoadUint64(&db.seq) -} - -// Atomically adds delta to seq. -func (db *DB) addSeq(delta uint64) { - atomic.AddUint64(&db.seq, delta) -} - -func (db *DB) setSeq(seq uint64) { - atomic.StoreUint64(&db.seq, seq) -} - -func (db *DB) sampleSeek(ikey internalKey) { - v := db.s.version() - if v.sampleSeek(ikey) { - // Trigger table compaction. - db.compTrigger(db.tcompCmdC) - } - v.release() -} - -func (db *DB) mpoolPut(mem *memdb.DB) { - if !db.isClosed() { - select { - case db.memPool <- mem: - default: - } - } -} - -func (db *DB) mpoolGet(n int) *memDB { - var mdb *memdb.DB - select { - case mdb = <-db.memPool: - default: - } - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - return &memDB{ - db: db, - DB: mdb, - } -} - -func (db *DB) mpoolDrain() { - ticker := time.NewTicker(30 * time.Second) - for { - select { - case <-ticker.C: - select { - case <-db.memPool: - default: - } - case <-db.closeC: - ticker.Stop() - // Make sure the pool is drained. - select { - case <-db.memPool: - case <-time.After(time.Second): - } - close(db.memPool) - return - } - } -} - -// Create new memdb and froze the old one; need external synchronization. -// newMem only called synchronously by the writer. -func (db *DB) newMem(n int) (mem *memDB, err error) { - fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} - w, err := db.s.stor.Create(fd) - if err != nil { - db.s.reuseFileNum(fd.Num) - return - } - - db.memMu.Lock() - defer db.memMu.Unlock() - - if db.frozenMem != nil { - return nil, errHasFrozenMem - } - - if db.journal == nil { - db.journal = journal.NewWriter(w) - } else { - db.journal.Reset(w) - db.journalWriter.Close() - db.frozenJournalFd = db.journalFd - } - db.journalWriter = w - db.journalFd = fd - db.frozenMem = db.mem - mem = db.mpoolGet(n) - mem.incref() // for self - mem.incref() // for caller - db.mem = mem - // The seq only incremented by the writer. And whoever called newMem - // should hold write lock, so no need additional synchronization here. - db.frozenSeq = db.seq - return -} - -// Get all memdbs. -func (db *DB) getMems() (e, f *memDB) { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem != nil { - db.mem.incref() - } else if !db.isClosed() { - panic("nil effective mem") - } - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.mem, db.frozenMem -} - -// Get effective memdb. -func (db *DB) getEffectiveMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem != nil { - db.mem.incref() - } else if !db.isClosed() { - panic("nil effective mem") - } - return db.mem -} - -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - -// Get frozen memdb. -func (db *DB) getFrozenMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.frozenMem -} - -// Drop frozen memdb; assume that frozen memdb isn't nil. -func (db *DB) dropFrozenMem() { - db.memMu.Lock() - if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) - } else { - db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) - } - db.frozenJournalFd = storage.FileDesc{} - db.frozenMem.decref() - db.frozenMem = nil - db.memMu.Unlock() -} - -// Clear mems ptr; used by DB.Close(). -func (db *DB) clearMems() { - db.memMu.Lock() - db.mem = nil - db.frozenMem = nil - db.memMu.Unlock() -} - -// Set closed flag; return true if not already closed. -func (db *DB) setClosed() bool { - return atomic.CompareAndSwapUint32(&db.closed, 0, 1) -} - -// Check whether DB was closed. -func (db *DB) isClosed() bool { - return atomic.LoadUint32(&db.closed) != 0 -} - -// Check read ok status. -func (db *DB) ok() error { - if db.isClosed() { - return ErrClosed - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go deleted file mode 100644 index b8f7e7d2..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright (c) 2016, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var errTransactionDone = errors.New("leveldb: transaction already closed") - -// Transaction is the transaction handle. -type Transaction struct { - db *DB - lk sync.RWMutex - seq uint64 - mem *memDB - tables tFiles - ikScratch []byte - rec sessionRecord - stats cStatStaging - closed bool -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { - tr.lk.RLock() - defer tr.lk.RUnlock() - if tr.closed { - return nil, errTransactionDone - } - return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Has returns. -func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { - tr.lk.RLock() - defer tr.lk.RUnlock() - if tr.closed { - return false, errTransactionDone - } - return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the transaction. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently while writes to the -// transaction. The resultant key/value pairs are guaranteed to be consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - tr.lk.RLock() - defer tr.lk.RUnlock() - if tr.closed { - return iterator.NewEmptyIterator(errTransactionDone) - } - tr.mem.incref() - return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) -} - -func (tr *Transaction) flush() error { - // Flush memdb. - if tr.mem.Len() != 0 { - tr.stats.startTimer() - iter := tr.mem.NewIterator(nil) - t, n, err := tr.db.s.tops.createFrom(iter) - iter.Release() - tr.stats.stopTimer() - if err != nil { - return err - } - if tr.mem.getref() == 1 { - tr.mem.Reset() - } else { - tr.mem.decref() - tr.mem = tr.db.mpoolGet(0) - tr.mem.incref() - } - tr.tables = append(tr.tables, t) - tr.rec.addTableFile(0, t) - tr.stats.write += t.size - tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) - } - return nil -} - -func (tr *Transaction) put(kt keyType, key, value []byte) error { - tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) - if tr.mem.Free() < len(tr.ikScratch)+len(value) { - if err := tr.flush(); err != nil { - return err - } - } - if err := tr.mem.Put(tr.ikScratch, value); err != nil { - return err - } - tr.seq++ - return nil -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// Please note that the transaction is not compacted until committed, so if you -// writes 10 same keys, then those 10 same keys are in the transaction. -// -// It is safe to modify the contents of the arguments after Put returns. -func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - return tr.put(keyTypeVal, key, value) -} - -// Delete deletes the value for the given key. -// Please note that the transaction is not compacted until committed, so if you -// writes 10 same keys, then those 10 same keys are in the transaction. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - return tr.put(keyTypeDel, key, nil) -} - -// Write apply the given batch to the transaction. The batch will be applied -// sequentially. -// Please note that the transaction is not compacted until committed, so if you -// writes 10 same keys, then those 10 same keys are in the transaction. -// -// It is safe to modify the contents of the arguments after Write returns. -func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { - if b == nil || b.Len() == 0 { - return nil - } - - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - return b.replayInternal(func(i int, kt keyType, k, v []byte) error { - return tr.put(kt, k, v) - }) -} - -func (tr *Transaction) setDone() { - tr.closed = true - tr.db.tr = nil - tr.mem.decref() - <-tr.db.writeLockC -} - -// Commit commits the transaction. If error is not nil, then the transaction is -// not committed, it can then either be retried or discarded. -// -// Other methods should not be called after transaction has been committed. -func (tr *Transaction) Commit() error { - if err := tr.db.ok(); err != nil { - return err - } - - tr.lk.Lock() - defer tr.lk.Unlock() - if tr.closed { - return errTransactionDone - } - if err := tr.flush(); err != nil { - // Return error, lets user decide either to retry or discard - // transaction. - return err - } - if len(tr.tables) != 0 { - // Committing transaction. - tr.rec.setSeqNum(tr.seq) - tr.db.compCommitLk.Lock() - tr.stats.startTimer() - var cerr error - for retry := 0; retry < 3; retry++ { - cerr = tr.db.s.commit(&tr.rec) - if cerr != nil { - tr.db.logf("transaction@commit error R·%d %q", retry, cerr) - select { - case <-time.After(time.Second): - case <-tr.db.closeC: - tr.db.logf("transaction@commit exiting") - tr.db.compCommitLk.Unlock() - return cerr - } - } else { - // Success. Set db.seq. - tr.db.setSeq(tr.seq) - break - } - } - tr.stats.stopTimer() - if cerr != nil { - // Return error, lets user decide either to retry or discard - // transaction. - return cerr - } - - // Update compaction stats. This is safe as long as we hold compCommitLk. - tr.db.compStats.addStat(0, &tr.stats) - - // Trigger table auto-compaction. - tr.db.compTrigger(tr.db.tcompCmdC) - tr.db.compCommitLk.Unlock() - - // Additionally, wait compaction when certain threshold reached. - // Ignore error, returns error only if transaction can't be committed. - tr.db.waitCompaction() - } - // Only mark as done if transaction committed successfully. - tr.setDone() - return nil -} - -func (tr *Transaction) discard() { - // Discard transaction. - for _, t := range tr.tables { - tr.db.logf("transaction@discard @%d", t.fd.Num) - if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { - tr.db.s.reuseFileNum(t.fd.Num) - } - } -} - -// Discard discards the transaction. -// -// Other methods should not be called after transaction has been discarded. -func (tr *Transaction) Discard() { - tr.lk.Lock() - if !tr.closed { - tr.discard() - tr.setDone() - } - tr.lk.Unlock() -} - -func (db *DB) waitCompaction() error { - if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() { - return db.compTriggerWait(db.tcompCmdC) - } - return nil -} - -// OpenTransaction opens an atomic DB transaction. Only one transaction can be -// opened at a time. Subsequent call to Write and OpenTransaction will be blocked -// until in-flight transaction is committed or discarded. -// The returned transaction handle is safe for concurrent use. -// -// Transaction is expensive and can overwhelm compaction, especially if -// transaction size is small. Use with caution. -// -// The transaction must be closed once done, either by committing or discarding -// the transaction. -// Closing the DB will discard open transaction. -func (db *DB) OpenTransaction() (*Transaction, error) { - if err := db.ok(); err != nil { - return nil, err - } - - // The write happen synchronously. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return nil, err - case <-db.closeC: - return nil, ErrClosed - } - - if db.tr != nil { - panic("leveldb: has open transaction") - } - - // Flush current memdb. - if db.mem != nil && db.mem.Len() != 0 { - if _, err := db.rotateMem(0, true); err != nil { - return nil, err - } - } - - // Wait compaction when certain threshold reached. - if err := db.waitCompaction(); err != nil { - return nil, err - } - - tr := &Transaction{ - db: db, - seq: db.seq, - mem: db.mpoolGet(0), - } - tr.mem.incref() - db.tr = tr - return tr, nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go deleted file mode 100644 index 7ecd960d..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader is the interface that wraps basic Get and NewIterator methods. -// This interface implemented by both DB and Snapshot. -type Reader interface { - Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) - NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator -} - -// Sizes is list of size. -type Sizes []int64 - -// Sum returns sum of the sizes. -func (sizes Sizes) Sum() int64 { - var sum int64 - for _, size := range sizes { - sum += size - } - return sum -} - -// Logging. -func (db *DB) log(v ...interface{}) { db.s.log(v...) } -func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } - -// Check and clean files. -func (db *DB) checkAndCleanFiles() error { - v := db.s.version() - defer v.release() - - tmap := make(map[int64]bool) - for _, tables := range v.levels { - for _, t := range tables { - tmap[t.fd.Num] = false - } - } - - fds, err := db.s.stor.List(storage.TypeAll) - if err != nil { - return err - } - - var nt int - var rem []storage.FileDesc - for _, fd := range fds { - keep := true - switch fd.Type { - case storage.TypeManifest: - keep = fd.Num >= db.s.manifestFd.Num - case storage.TypeJournal: - if !db.frozenJournalFd.Zero() { - keep = fd.Num >= db.frozenJournalFd.Num - } else { - keep = fd.Num >= db.journalFd.Num - } - case storage.TypeTable: - _, keep = tmap[fd.Num] - if keep { - tmap[fd.Num] = true - nt++ - } - } - - if !keep { - rem = append(rem, fd) - } - } - - if nt != len(tmap) { - var mfds []storage.FileDesc - for num, present := range tmap { - if !present { - mfds = append(mfds, storage.FileDesc{storage.TypeTable, num}) - db.logf("db@janitor table missing @%d", num) - } - } - return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) - } - - db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) - for _, fd := range rem { - db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) - if err := db.s.stor.Remove(fd); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go deleted file mode 100644 index db0c1bec..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error { - wr, err := db.journal.Next() - if err != nil { - return err - } - if err := writeBatchesWithHeader(wr, batches, seq); err != nil { - return err - } - if err := db.journal.Flush(); err != nil { - return err - } - if sync { - return db.journalWriter.Sync() - } - return nil -} - -func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { - retryLimit := 3 -retry: - // Wait for pending memdb compaction. - err = db.compTriggerWait(db.mcompCmdC) - if err != nil { - return - } - retryLimit-- - - // Create new memdb and journal. - mem, err = db.newMem(n) - if err != nil { - if err == errHasFrozenMem { - if retryLimit <= 0 { - panic("BUG: still has frozen memdb") - } - goto retry - } - return - } - - // Schedule memdb compaction. - if wait { - err = db.compTriggerWait(db.mcompCmdC) - } else { - db.compTrigger(db.mcompCmdC) - } - return -} - -func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { - delayed := false - slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger() - pauseTrigger := db.s.o.GetWriteL0PauseTrigger() - flush := func() (retry bool) { - mdb = db.getEffectiveMem() - if mdb == nil { - err = ErrClosed - return false - } - defer func() { - if retry { - mdb.decref() - mdb = nil - } - }() - tLen := db.s.tLen(0) - mdbFree = mdb.Free() - switch { - case tLen >= slowdownTrigger && !delayed: - delayed = true - time.Sleep(time.Millisecond) - case mdbFree >= n: - return false - case tLen >= pauseTrigger: - delayed = true - // Set the write paused flag explicitly. - atomic.StoreInt32(&db.inWritePaused, 1) - err = db.compTriggerWait(db.tcompCmdC) - // Unset the write paused flag. - atomic.StoreInt32(&db.inWritePaused, 0) - if err != nil { - return false - } - default: - // Allow memdb to grow if it has no entry. - if mdb.Len() == 0 { - mdbFree = n - } else { - mdb.decref() - mdb, err = db.rotateMem(n, false) - if err == nil { - mdbFree = mdb.Free() - } else { - mdbFree = 0 - } - } - return false - } - return true - } - start := time.Now() - for flush() { - } - if delayed { - db.writeDelay += time.Since(start) - db.writeDelayN++ - } else if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN)) - atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay)) - db.writeDelay = 0 - db.writeDelayN = 0 - } - return -} - -type writeMerge struct { - sync bool - batch *Batch - keyType keyType - key, value []byte -} - -func (db *DB) unlockWrite(overflow bool, merged int, err error) { - for i := 0; i < merged; i++ { - db.writeAckC <- err - } - if overflow { - // Pass lock to the next write (that failed to merge). - db.writeMergedC <- false - } else { - // Release lock. - <-db.writeLockC - } -} - -// ourBatch is batch that we can modify. -func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { - // Try to flush memdb. This method would also trying to throttle writes - // if it is too fast and compaction cannot catch-up. - mdb, mdbFree, err := db.flush(batch.internalLen) - if err != nil { - db.unlockWrite(false, 0, err) - return err - } - defer mdb.decref() - - var ( - overflow bool - merged int - batches = []*Batch{batch} - ) - - if merge { - // Merge limit. - var mergeLimit int - if batch.internalLen > 128<<10 { - mergeLimit = (1 << 20) - batch.internalLen - } else { - mergeLimit = 128 << 10 - } - mergeCap := mdbFree - batch.internalLen - if mergeLimit > mergeCap { - mergeLimit = mergeCap - } - - merge: - for mergeLimit > 0 { - select { - case incoming := <-db.writeMergeC: - if incoming.batch != nil { - // Merge batch. - if incoming.batch.internalLen > mergeLimit { - overflow = true - break merge - } - batches = append(batches, incoming.batch) - mergeLimit -= incoming.batch.internalLen - } else { - // Merge put. - internalLen := len(incoming.key) + len(incoming.value) + 8 - if internalLen > mergeLimit { - overflow = true - break merge - } - if ourBatch == nil { - ourBatch = db.batchPool.Get().(*Batch) - ourBatch.Reset() - batches = append(batches, ourBatch) - } - // We can use same batch since concurrent write doesn't - // guarantee write order. - ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value) - mergeLimit -= internalLen - } - sync = sync || incoming.sync - merged++ - db.writeMergedC <- true - - default: - break merge - } - } - } - - // Release ourBatch if any. - if ourBatch != nil { - defer db.batchPool.Put(ourBatch) - } - - // Seq number. - seq := db.seq + 1 - - // Write journal. - if err := db.writeJournal(batches, seq, sync); err != nil { - db.unlockWrite(overflow, merged, err) - return err - } - - // Put batches. - for _, batch := range batches { - if err := batch.putMem(seq, mdb.DB); err != nil { - panic(err) - } - seq += uint64(batch.Len()) - } - - // Incr seq number. - db.addSeq(uint64(batchesLen(batches))) - - // Rotate memdb if it's reach the threshold. - if batch.internalLen >= mdbFree { - db.rotateMem(0, false) - } - - db.unlockWrite(overflow, merged, nil) - return nil -} - -// Write apply the given batch to the DB. The batch records will be applied -// sequentially. Write might be used concurrently, when used concurrently and -// batch is small enough, write will try to merge the batches. Set NoWriteMerge -// option to true to disable write merge. -// -// It is safe to modify the contents of the arguments after Write returns but -// not before. Write will not modify content of the batch. -func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error { - if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 { - return err - } - - // If the batch size is larger than write buffer, it may justified to write - // using transaction instead. Using transaction the batch will be written - // into tables directly, skipping the journaling. - if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { - tr, err := db.OpenTransaction() - if err != nil { - return err - } - if err := tr.Write(batch, wo); err != nil { - tr.Discard() - return err - } - return tr.Commit() - } - - merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() - sync := wo.GetSync() && !db.s.o.GetNoSync() - - // Acquire write lock. - if merge { - select { - case db.writeMergeC <- writeMerge{sync: sync, batch: batch}: - if <-db.writeMergedC { - // Write is merged. - return <-db.writeAckC - } - // Write is not merged, the write lock is handed to us. Continue. - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } else { - select { - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } - - return db.writeLocked(batch, nil, merge, sync) -} - -func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error { - if err := db.ok(); err != nil { - return err - } - - merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() - sync := wo.GetSync() && !db.s.o.GetNoSync() - - // Acquire write lock. - if merge { - select { - case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}: - if <-db.writeMergedC { - // Write is merged. - return <-db.writeAckC - } - // Write is not merged, the write lock is handed to us. Continue. - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } else { - select { - case db.writeLockC <- struct{}{}: - // Write lock acquired. - case err := <-db.compPerErrC: - // Compaction error. - return err - case <-db.closeC: - // Closed - return ErrClosed - } - } - - batch := db.batchPool.Get().(*Batch) - batch.Reset() - batch.appendRec(kt, key, value) - return db.writeLocked(batch, batch, merge, sync) -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. Write merge also applies for Put, see -// Write. -// -// It is safe to modify the contents of the arguments after Put returns but not -// before. -func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { - return db.putRec(keyTypeVal, key, value, wo) -} - -// Delete deletes the value for the given key. Delete will not returns error if -// key doesn't exist. Write merge also applies for Delete, see Write. -// -// It is safe to modify the contents of the arguments after Delete returns but -// not before. -func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { - return db.putRec(keyTypeDel, key, nil, wo) -} - -func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { - iter := mem.NewIterator(nil) - defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) -} - -// CompactRange compacts the underlying DB for the given key range. -// In particular, deleted and overwritten versions are discarded, -// and the data is rearranged to reduce the cost of operations -// needed to access the data. This operation should typically only -// be invoked by users who understand the underlying implementation. -// -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -// Therefore if both is nil then it will compact entire DB. -func (db *DB) CompactRange(r util.Range) error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return err - case <-db.closeC: - return ErrClosed - } - - // Check for overlaps in memdb. - mdb := db.getEffectiveMem() - if mdb == nil { - return ErrClosed - } - defer mdb.decref() - if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { - // Memdb compaction. - if _, err := db.rotateMem(0, false); err != nil { - <-db.writeLockC - return err - } - <-db.writeLockC - if err := db.compTriggerWait(db.mcompCmdC); err != nil { - return err - } - } else { - <-db.writeLockC - } - - // Table compaction. - return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) -} - -// SetReadOnly makes DB read-only. It will stay read-only until reopened. -func (db *DB) SetReadOnly() error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - db.compWriteLocking = true - case err := <-db.compPerErrC: - return err - case <-db.closeC: - return ErrClosed - } - - // Set compaction read-only. - select { - case db.compErrSetC <- ErrReadOnly: - case perr := <-db.compPerErrC: - return perr - case <-db.closeC: - return ErrClosed - } - - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go deleted file mode 100644 index be768e57..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package leveldb provides implementation of LevelDB key/value database. -// -// Create or open a database: -// -// // The returned DB instance is safe for concurrent use. Which mean that all -// // DB's methods may be called concurrently from multiple goroutine. -// db, err := leveldb.OpenFile("path/to/db", nil) -// ... -// defer db.Close() -// ... -// -// Read or modify the database content: -// -// // Remember that the contents of the returned slice should not be modified. -// data, err := db.Get([]byte("key"), nil) -// ... -// err = db.Put([]byte("key"), []byte("value"), nil) -// ... -// err = db.Delete([]byte("key"), nil) -// ... -// -// Iterate over database content: -// -// iter := db.NewIterator(nil, nil) -// for iter.Next() { -// // Remember that the contents of the returned slice should not be modified, and -// // only valid until the next call to Next. -// key := iter.Key() -// value := iter.Value() -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content with a particular prefix: -// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Seek-then-Iterate: -// -// iter := db.NewIterator(nil, nil) -// for ok := iter.Seek(key); ok; ok = iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content: -// -// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Batch writes: -// -// batch := new(leveldb.Batch) -// batch.Put([]byte("foo"), []byte("value")) -// batch.Put([]byte("bar"), []byte("another value")) -// batch.Delete([]byte("baz")) -// err = db.Write(batch, nil) -// ... -// -// Use bloom filter: -// -// o := &opt.Options{ -// Filter: filter.NewBloomFilter(10), -// } -// db, err := leveldb.OpenFile("path/to/db", o) -// ... -// defer db.Close() -// ... -package leveldb diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go deleted file mode 100644 index de264981..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" -) - -// Common errors. -var ( - ErrNotFound = errors.ErrNotFound - ErrReadOnly = errors.New("leveldb: read-only mode") - ErrSnapshotReleased = errors.New("leveldb: snapshot released") - ErrIterReleased = errors.New("leveldb: iterator released") - ErrClosed = errors.New("leveldb: closed") -) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go deleted file mode 100644 index 8d6146b6..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package errors provides common error types used throughout leveldb. -package errors - -import ( - "errors" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Common errors. -var ( - ErrNotFound = New("leveldb: not found") - ErrReleased = util.ErrReleased - ErrHasReleaser = util.ErrHasReleaser -) - -// New returns an error that formats as the given text. -func New(text string) error { - return errors.New(text) -} - -// ErrCorrupted is the type that wraps errors that indicate corruption in -// the database. -type ErrCorrupted struct { - Fd storage.FileDesc - Err error -} - -func (e *ErrCorrupted) Error() string { - if !e.Fd.Zero() { - return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) - } - return e.Err.Error() -} - -// NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(fd storage.FileDesc, err error) error { - return &ErrCorrupted{fd, err} -} - -// IsCorrupted returns a boolean indicating whether the error is indicating -// a corruption. -func IsCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - case *storage.ErrCorrupted: - return true - } - return false -} - -// ErrMissingFiles is the type that indicating a corruption due to missing -// files. ErrMissingFiles always wrapped with ErrCorrupted. -type ErrMissingFiles struct { - Fds []storage.FileDesc -} - -func (e *ErrMissingFiles) Error() string { return "file missing" } - -// SetFd sets 'file info' of the given error with the given file. -// Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFd(err error, fd storage.FileDesc) error { - switch x := err.(type) { - case *ErrCorrupted: - x.Fd = fd - return x - } - return err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go deleted file mode 100644 index e961e420..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" -) - -type iFilter struct { - filter.Filter -} - -func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, internalKey(key).ukey()) -} - -func (f iFilter) NewGenerator() filter.FilterGenerator { - return iFilterGenerator{f.Filter.NewGenerator()} -} - -type iFilterGenerator struct { - filter.FilterGenerator -} - -func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(internalKey(key).ukey()) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go deleted file mode 100644 index bab0e997..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -func bloomHash(key []byte) uint32 { - return util.Hash(key, 0xbc9f1d34) -} - -type bloomFilter int - -// The bloom filter serializes its parameters and is backward compatible -// with respect to them. Therefor, its parameters are not added to its -// name. -func (bloomFilter) Name() string { - return "leveldb.BuiltinBloomFilter" -} - -func (f bloomFilter) Contains(filter, key []byte) bool { - nBytes := len(filter) - 1 - if nBytes < 1 { - return false - } - nBits := uint32(nBytes * 8) - - // Use the encoded k so that we can read filters generated by - // bloom filters created using different parameters. - k := filter[nBytes] - if k > 30 { - // Reserved for potentially new encodings for short bloom filters. - // Consider it a match. - return true - } - - kh := bloomHash(key) - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < k; j++ { - bitpos := kh % nBits - if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { - return false - } - kh += delta - } - return true -} - -func (f bloomFilter) NewGenerator() FilterGenerator { - // Round down to reduce probing cost a little bit. - k := uint8(f * 69 / 100) // 0.69 =~ ln(2) - if k < 1 { - k = 1 - } else if k > 30 { - k = 30 - } - return &bloomFilterGenerator{ - n: int(f), - k: k, - } -} - -type bloomFilterGenerator struct { - n int - k uint8 - - keyHashes []uint32 -} - -func (g *bloomFilterGenerator) Add(key []byte) { - // Use double-hashing to generate a sequence of hash values. - // See analysis in [Kirsch,Mitzenmacher 2006]. - g.keyHashes = append(g.keyHashes, bloomHash(key)) -} - -func (g *bloomFilterGenerator) Generate(b Buffer) { - // Compute bloom filter size (in both bits and bytes) - nBits := uint32(len(g.keyHashes) * g.n) - // For small n, we can see a very high false positive rate. Fix it - // by enforcing a minimum bloom filter length. - if nBits < 64 { - nBits = 64 - } - nBytes := (nBits + 7) / 8 - nBits = nBytes * 8 - - dest := b.Alloc(int(nBytes) + 1) - dest[nBytes] = g.k - for _, kh := range g.keyHashes { - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < g.k; j++ { - bitpos := kh % nBits - dest[bitpos/8] |= (1 << (bitpos % 8)) - kh += delta - } - } - - g.keyHashes = g.keyHashes[:0] -} - -// NewBloomFilter creates a new initialized bloom filter for given -// bitsPerKey. -// -// Since bitsPerKey is persisted individually for each bloom filter -// serialization, bloom filters are backwards compatible with respect to -// changing bitsPerKey. This means that no big performance penalty will -// be experienced when changing the parameter. See documentation for -// opt.Options.Filter for more information. -func NewBloomFilter(bitsPerKey int) Filter { - return bloomFilter(bitsPerKey) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go deleted file mode 100644 index 7a925c5a..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package filter provides interface and implementation of probabilistic -// data structure. -// -// The filter is resposible for creating small filter from a set of keys. -// These filter will then used to test whether a key is a member of the set. -// In many cases, a filter can cut down the number of disk seeks from a -// handful to a single disk seek per DB.Get call. -package filter - -// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. -type Buffer interface { - // Alloc allocs n bytes of slice from the buffer. This also advancing - // write offset. - Alloc(n int) []byte - - // Write appends the contents of p to the buffer. - Write(p []byte) (n int, err error) - - // WriteByte appends the byte c to the buffer. - WriteByte(c byte) error -} - -// Filter is the filter. -type Filter interface { - // Name returns the name of this policy. - // - // Note that if the filter encoding changes in an incompatible way, - // the name returned by this method must be changed. Otherwise, old - // incompatible filters may be passed to methods of this type. - Name() string - - // NewGenerator creates a new filter generator. - NewGenerator() FilterGenerator - - // Contains returns true if the filter contains the given key. - // - // The filter are filters generated by the filter generator. - Contains(filter, key []byte) bool -} - -// FilterGenerator is the filter generator. -type FilterGenerator interface { - // Add adds a key to the filter generator. - // - // The key may become invalid after call to this method end, therefor - // key must be copied if implementation require keeping key for later - // use. The key should not modified directly, doing so may cause - // undefined results. - Add(key []byte) - - // Generate generates filters based on keys passed so far. After call - // to Generate the filter generator maybe resetted, depends on implementation. - Generate(b Buffer) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go deleted file mode 100644 index a23ab05f..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -// BasicArray is the interface that wraps basic Len and Search method. -type BasicArray interface { - // Len returns length of the array. - Len() int - - // Search finds smallest index that point to a key that is greater - // than or equal to the given key. - Search(key []byte) int -} - -// Array is the interface that wraps BasicArray and basic Index method. -type Array interface { - BasicArray - - // Index returns key/value pair with index of i. - Index(i int) (key, value []byte) -} - -// Array is the interface that wraps BasicArray and basic Get method. -type ArrayIndexer interface { - BasicArray - - // Get returns a new data iterator with index of i. - Get(i int) Iterator -} - -type basicArrayIterator struct { - util.BasicReleaser - array BasicArray - pos int - err error -} - -func (i *basicArrayIterator) Valid() bool { - return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() -} - -func (i *basicArrayIterator) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.array.Len() == 0 { - i.pos = -1 - return false - } - i.pos = 0 - return true -} - -func (i *basicArrayIterator) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = n - 1 - return true -} - -func (i *basicArrayIterator) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true -} - -func (i *basicArrayIterator) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos++ - if n := i.array.Len(); i.pos >= n { - i.pos = n - return false - } - return true -} - -func (i *basicArrayIterator) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos-- - if i.pos < 0 { - i.pos = -1 - return false - } - return true -} - -func (i *basicArrayIterator) Error() error { return i.err } - -type arrayIterator struct { - basicArrayIterator - array Array - pos int - key, value []byte -} - -func (i *arrayIterator) updateKV() { - if i.pos == i.basicArrayIterator.pos { - return - } - i.pos = i.basicArrayIterator.pos - if i.Valid() { - i.key, i.value = i.array.Index(i.pos) - } else { - i.key = nil - i.value = nil - } -} - -func (i *arrayIterator) Key() []byte { - i.updateKV() - return i.key -} - -func (i *arrayIterator) Value() []byte { - i.updateKV() - return i.value -} - -type arrayIteratorIndexer struct { - basicArrayIterator - array ArrayIndexer -} - -func (i *arrayIteratorIndexer) Get() Iterator { - if i.Valid() { - return i.array.Get(i.basicArrayIterator.pos) - } - return nil -} - -// NewArrayIterator returns an iterator from the given array. -func NewArrayIterator(array Array) Iterator { - return &arrayIterator{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - pos: -1, - } -} - -// NewArrayIndexer returns an index iterator from the given array. -func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { - return &arrayIteratorIndexer{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go deleted file mode 100644 index 939adbb9..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorIndexer is the interface that wraps CommonIterator and basic Get -// method. IteratorIndexer provides index for indexed iterator. -type IteratorIndexer interface { - CommonIterator - - // Get returns a new data iterator for the current position, or nil if - // done. - Get() Iterator -} - -type indexedIterator struct { - util.BasicReleaser - index IteratorIndexer - strict bool - - data Iterator - err error - errf func(err error) - closed bool -} - -func (i *indexedIterator) setData() { - if i.data != nil { - i.data.Release() - } - i.data = i.index.Get() -} - -func (i *indexedIterator) clearData() { - if i.data != nil { - i.data.Release() - } - i.data = nil -} - -func (i *indexedIterator) indexErr() { - if err := i.index.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - i.err = err - } -} - -func (i *indexedIterator) dataErr() bool { - if err := i.data.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *indexedIterator) Valid() bool { - return i.data != nil && i.data.Valid() -} - -func (i *indexedIterator) First() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.First() { - i.indexErr() - i.clearData() - return false - } - i.setData() - return i.Next() -} - -func (i *indexedIterator) Last() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Last() { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - return true -} - -func (i *indexedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Seek(key) { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Seek(key) { - if i.dataErr() { - return false - } - i.clearData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Next() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Next(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Next() { - i.indexErr() - return false - } - i.setData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Prev() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Prev(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Prev() { - i.indexErr() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - } - return true -} - -func (i *indexedIterator) Key() []byte { - if i.data == nil { - return nil - } - return i.data.Key() -} - -func (i *indexedIterator) Value() []byte { - if i.data == nil { - return nil - } - return i.data.Value() -} - -func (i *indexedIterator) Release() { - i.clearData() - i.index.Release() - i.BasicReleaser.Release() -} - -func (i *indexedIterator) Error() error { - if i.err != nil { - return i.err - } - if err := i.index.Error(); err != nil { - return err - } - return nil -} - -func (i *indexedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewIndexedIterator returns an 'indexed iterator'. An index is iterator -// that returns another iterator, a 'data iterator'. A 'data iterator' is the -// iterator that contains actual key/value pairs. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'indexed iterator', otherwise the iterator will -// continue to the next 'data iterator'. Corruption on 'index iterator' will not be -// ignored and will halt the iterator. -func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { - return &indexedIterator{index: index, strict: strict} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go deleted file mode 100644 index 96fb0f68..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package iterator provides interface and implementation to traverse over -// contents of a database. -package iterator - -import ( - "errors" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrIterReleased = errors.New("leveldb/iterator: iterator released") -) - -// IteratorSeeker is the interface that wraps the 'seeks method'. -type IteratorSeeker interface { - // First moves the iterator to the first key/value pair. If the iterator - // only contains one key/value pair then First and Last would moves - // to the same key/value pair. - // It returns whether such pair exist. - First() bool - - // Last moves the iterator to the last key/value pair. If the iterator - // only contains one key/value pair then First and Last would moves - // to the same key/value pair. - // It returns whether such pair exist. - Last() bool - - // Seek moves the iterator to the first key/value pair whose key is greater - // than or equal to the given key. - // It returns whether such pair exist. - // - // It is safe to modify the contents of the argument after Seek returns. - Seek(key []byte) bool - - // Next moves the iterator to the next key/value pair. - // It returns false if the iterator is exhausted. - Next() bool - - // Prev moves the iterator to the previous key/value pair. - // It returns false if the iterator is exhausted. - Prev() bool -} - -// CommonIterator is the interface that wraps common iterator methods. -type CommonIterator interface { - IteratorSeeker - - // util.Releaser is the interface that wraps basic Release method. - // When called Release will releases any resources associated with the - // iterator. - util.Releaser - - // util.ReleaseSetter is the interface that wraps the basic SetReleaser - // method. - util.ReleaseSetter - - // TODO: Remove this when ready. - Valid() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error -} - -// Iterator iterates over a DB's key/value pairs in key order. -// -// When encounter an error any 'seeks method' will return false and will -// yield no key/value pairs. The error can be queried by calling the Error -// method. Calling Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read -// an iterator until exhaustion. -// Also, an iterator is not necessarily safe for concurrent use, but it is -// safe to use multiple iterators concurrently, with each in a dedicated -// goroutine. -type Iterator interface { - CommonIterator - - // Key returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Key() []byte - - // Value returns the value of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Value() []byte -} - -// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback -// method. -// -// ErrorCallbackSetter implemented by indexed and merged iterator. -type ErrorCallbackSetter interface { - // SetErrorCallback allows set an error callback of the corresponding - // iterator. Use nil to clear the callback. - SetErrorCallback(f func(err error)) -} - -type emptyIterator struct { - util.BasicReleaser - err error -} - -func (i *emptyIterator) rErr() { - if i.err == nil && i.Released() { - i.err = ErrIterReleased - } -} - -func (*emptyIterator) Valid() bool { return false } -func (i *emptyIterator) First() bool { i.rErr(); return false } -func (i *emptyIterator) Last() bool { i.rErr(); return false } -func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } -func (i *emptyIterator) Next() bool { i.rErr(); return false } -func (i *emptyIterator) Prev() bool { i.rErr(); return false } -func (*emptyIterator) Key() []byte { return nil } -func (*emptyIterator) Value() []byte { return nil } -func (i *emptyIterator) Error() error { return i.err } - -// NewEmptyIterator creates an empty iterator. The err parameter can be -// nil, but if not nil the given err will be returned by Error method. -func NewEmptyIterator(err error) Iterator { - return &emptyIterator{err: err} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go deleted file mode 100644 index 1a7e29df..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type mergedIterator struct { - cmp comparer.Comparer - iters []Iterator - strict bool - - keys [][]byte - index int - dir dir - err error - errf func(err error) - releaser util.Releaser -} - -func assertKey(key []byte) []byte { - if key == nil { - panic("leveldb/iterator: nil key") - } - return key -} - -func (i *mergedIterator) iterErr(iter Iterator) bool { - if err := iter.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *mergedIterator) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *mergedIterator) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.First(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirEOI - return i.prev() -} - -func (i *mergedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Seek(key): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirEOI - return false - } - i.dir = dirForward - return true -} - -func (i *mergedIterator) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirSOI: - return i.First() - case dirBackward: - key := append([]byte{}, i.keys[i.index]...) - if !i.Seek(key) { - return false - } - return i.Next() - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Next(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.next() -} - -func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirSOI - return false - } - i.dir = dirBackward - return true -} - -func (i *mergedIterator) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - key := append([]byte{}, i.keys[i.index]...) - for x, iter := range i.iters { - if x == i.index { - continue - } - seek := iter.Seek(key) - switch { - case seek && iter.Prev(), !seek && iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Prev(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.prev() -} - -func (i *mergedIterator) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.keys[i.index] -} - -func (i *mergedIterator) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.iters[i.index].Value() -} - -func (i *mergedIterator) Release() { - if i.dir != dirReleased { - i.dir = dirReleased - for _, iter := range i.iters { - iter.Release() - } - i.iters = nil - i.keys = nil - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *mergedIterator) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *mergedIterator) Error() error { - return i.err -} - -func (i *mergedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewMergedIterator returns an iterator that merges its input. Walking the -// resultant iterator will return all key/value pairs of all input iterators -// in strictly increasing key order, as defined by cmp. -// The input's key ranges may overlap, but there are assumed to be no duplicate -// keys: if iters[i] contains a key k then iters[j] will not contain that key k. -// None of the iters may be nil. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'merged iterator', otherwise the iterator will -// continue to the next 'input iterator'. -func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { - return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go deleted file mode 100644 index d094c3d0..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -// Package journal reads and writes sequences of journals. Each journal is a stream -// of bytes that completes before the next journal starts. -// -// When reading, call Next to obtain an io.Reader for the next journal. Next will -// return io.EOF when there are no more journals. It is valid to call Next -// without reading the current journal to exhaustion. -// -// When writing, call Next to obtain an io.Writer for the next journal. Calling -// Next finishes the current journal. Call Close to finish the final journal. -// -// Optionally, call Flush to finish the current journal and flush the underlying -// writer without starting a new journal. To start a new journal after flushing, -// call Next. -// -// Neither Readers or Writers are safe to use concurrently. -// -// Example code: -// func read(r io.Reader) ([]string, error) { -// var ss []string -// journals := journal.NewReader(r, nil, true, true) -// for { -// j, err := journals.Next() -// if err == io.EOF { -// break -// } -// if err != nil { -// return nil, err -// } -// s, err := ioutil.ReadAll(j) -// if err != nil { -// return nil, err -// } -// ss = append(ss, string(s)) -// } -// return ss, nil -// } -// -// func write(w io.Writer, ss []string) error { -// journals := journal.NewWriter(w) -// for _, s := range ss { -// j, err := journals.Next() -// if err != nil { -// return err -// } -// if _, err := j.Write([]byte(s)), err != nil { -// return err -// } -// } -// return journals.Close() -// } -// -// The wire format is that the stream is divided into 32KiB blocks, and each -// block contains a number of tightly packed chunks. Chunks cannot cross block -// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a -// block must be zero. -// -// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 -// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) -// followed by a payload. The checksum is over the chunk type and the payload. -// -// There are four chunk types: whether the chunk is the full journal, or the -// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal -// has one first chunk, zero or more middle chunks, and one last chunk. -// -// The wire format allows for limited recovery in the face of data corruption: -// on a format error (such as a checksum mismatch), the reader moves to the -// next block and looks for the next full or first chunk. -package journal - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// These constants are part of the wire format and should not be changed. -const ( - fullChunkType = 1 - firstChunkType = 2 - middleChunkType = 3 - lastChunkType = 4 -) - -const ( - blockSize = 32 * 1024 - headerSize = 7 -) - -type flusher interface { - Flush() error -} - -// ErrCorrupted is the error type that generated by corrupted block or chunk. -type ErrCorrupted struct { - Size int - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) -} - -// Dropper is the interface that wrap simple Drop method. The Drop -// method will be called when the journal reader dropping a block or chunk. -type Dropper interface { - Drop(err error) -} - -// Reader reads journals from an underlying io.Reader. -type Reader struct { - // r is the underlying reader. - r io.Reader - // the dropper. - dropper Dropper - // strict flag. - strict bool - // checksum flag. - checksum bool - // seq is the sequence number of the current journal. - seq int - // buf[i:j] is the unread portion of the current chunk's payload. - // The low bound, i, excludes the chunk header. - i, j int - // n is the number of bytes of buf that are valid. Once reading has started, - // only the final block can have n < blockSize. - n int - // last is whether the current chunk is the last chunk of the journal. - last bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewReader returns a new reader. The dropper may be nil, and if -// strict is true then corrupted or invalid chunk will halt the journal -// reader entirely. -func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { - return &Reader{ - r: r, - dropper: dropper, - strict: strict, - checksum: checksum, - last: true, - } -} - -var errSkip = errors.New("leveldb/journal: skipped") - -func (r *Reader) corrupt(n int, reason string, skip bool) error { - if r.dropper != nil { - r.dropper.Drop(&ErrCorrupted{n, reason}) - } - if r.strict && !skip { - r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) - return r.err - } - return errSkip -} - -// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the -// next block into the buffer if necessary. -func (r *Reader) nextChunk(first bool) error { - for { - if r.j+headerSize <= r.n { - checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) - length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) - chunkType := r.buf[r.j+6] - unprocBlock := r.n - r.j - if checksum == 0 && length == 0 && chunkType == 0 { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, "zero header", false) - } - if chunkType < fullChunkType || chunkType > lastChunkType { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false) - } - r.i = r.j + headerSize - r.j = r.j + headerSize + int(length) - if r.j > r.n { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, "chunk length overflows block", false) - } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(unprocBlock, "checksum mismatch", false) - } - if first && chunkType != fullChunkType && chunkType != firstChunkType { - chunkLength := (r.j - r.i) + headerSize - r.i = r.j - // Report the error, but skip it. - return r.corrupt(chunkLength, "orphan chunk", true) - } - r.last = chunkType == fullChunkType || chunkType == lastChunkType - return nil - } - - // The last block. - if r.n < blockSize && r.n > 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - - // Read block. - n, err := io.ReadFull(r.r, r.buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return err - } - if n == 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - r.i, r.j, r.n = 0, 0, n - } -} - -// Next returns a reader for the next journal. It returns io.EOF if there are no -// more journals. The reader returned becomes stale after the next Next call, -// and should no longer be used. If strict is false, the reader will returns -// io.ErrUnexpectedEOF error when found corrupted journal. -func (r *Reader) Next() (io.Reader, error) { - r.seq++ - if r.err != nil { - return nil, r.err - } - r.i = r.j - for { - if err := r.nextChunk(true); err == nil { - break - } else if err != errSkip { - return nil, err - } - } - return &singleReader{r, r.seq, nil}, nil -} - -// Reset resets the journal reader, allows reuse of the journal reader. Reset returns -// last accumulated error. -func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { - r.seq++ - err := r.err - r.r = reader - r.dropper = dropper - r.strict = strict - r.checksum = checksum - r.i = 0 - r.j = 0 - r.n = 0 - r.last = true - r.err = nil - return err -} - -type singleReader struct { - r *Reader - seq int - err error -} - -func (x *singleReader) Read(p []byte) (int, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - n := copy(p, r.buf[r.i:r.j]) - r.i += n - return n, nil -} - -func (x *singleReader) ReadByte() (byte, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - c := r.buf[r.i] - r.i++ - return c, nil -} - -// Writer writes journals to an underlying io.Writer. -type Writer struct { - // w is the underlying writer. - w io.Writer - // seq is the sequence number of the current journal. - seq int - // f is w as a flusher. - f flusher - // buf[i:j] is the bytes that will become the current chunk. - // The low bound, i, includes the chunk header. - i, j int - // buf[:written] has already been written to w. - // written is zero unless Flush has been called. - written int - // first is whether the current chunk is the first chunk of the journal. - first bool - // pending is whether a chunk is buffered but not yet written. - pending bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewWriter returns a new Writer. -func NewWriter(w io.Writer) *Writer { - f, _ := w.(flusher) - return &Writer{ - w: w, - f: f, - } -} - -// fillHeader fills in the header for the pending chunk. -func (w *Writer) fillHeader(last bool) { - if w.i+headerSize > w.j || w.j > blockSize { - panic("leveldb/journal: bad writer state") - } - if last { - if w.first { - w.buf[w.i+6] = fullChunkType - } else { - w.buf[w.i+6] = lastChunkType - } - } else { - if w.first { - w.buf[w.i+6] = firstChunkType - } else { - w.buf[w.i+6] = middleChunkType - } - } - binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) - binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) -} - -// writeBlock writes the buffered block to the underlying writer, and reserves -// space for the next chunk's header. -func (w *Writer) writeBlock() { - _, w.err = w.w.Write(w.buf[w.written:]) - w.i = 0 - w.j = headerSize - w.written = 0 -} - -// writePending finishes the current journal and writes the buffer to the -// underlying writer. -func (w *Writer) writePending() { - if w.err != nil { - return - } - if w.pending { - w.fillHeader(true) - w.pending = false - } - _, w.err = w.w.Write(w.buf[w.written:w.j]) - w.written = w.j -} - -// Close finishes the current journal and closes the writer. -func (w *Writer) Close() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - w.err = errors.New("leveldb/journal: closed Writer") - return nil -} - -// Flush finishes the current journal, writes to the underlying writer, and -// flushes it if that writer implements interface{ Flush() error }. -func (w *Writer) Flush() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - if w.f != nil { - w.err = w.f.Flush() - return w.err - } - return nil -} - -// Reset resets the journal writer, allows reuse of the journal writer. Reset -// will also closes the journal writer if not already. -func (w *Writer) Reset(writer io.Writer) (err error) { - w.seq++ - if w.err == nil { - w.writePending() - err = w.err - } - w.w = writer - w.f, _ = writer.(flusher) - w.i = 0 - w.j = 0 - w.written = 0 - w.first = false - w.pending = false - w.err = nil - return -} - -// Next returns a writer for the next journal. The writer returned becomes stale -// after the next Close, Flush or Next call, and should no longer be used. -func (w *Writer) Next() (io.Writer, error) { - w.seq++ - if w.err != nil { - return nil, w.err - } - if w.pending { - w.fillHeader(true) - } - w.i = w.j - w.j = w.j + headerSize - // Check if there is room in the block for the header. - if w.j > blockSize { - // Fill in the rest of the block with zeroes. - for k := w.i; k < blockSize; k++ { - w.buf[k] = 0 - } - w.writeBlock() - if w.err != nil { - return nil, w.err - } - } - w.first = true - w.pending = true - return singleWriter{w, w.seq}, nil -} - -type singleWriter struct { - w *Writer - seq int -} - -func (x singleWriter) Write(p []byte) (int, error) { - w := x.w - if w.seq != x.seq { - return 0, errors.New("leveldb/journal: stale writer") - } - if w.err != nil { - return 0, w.err - } - n0 := len(p) - for len(p) > 0 { - // Write a block, if it is full. - if w.j == blockSize { - w.fillHeader(false) - w.writeBlock() - if w.err != nil { - return 0, w.err - } - w.first = false - } - // Copy bytes into the buffer. - n := copy(w.buf[w.j:], p) - w.j += n - p = p[n:] - } - return n0, nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index ad8f51ec..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// ErrInternalKeyCorrupted records internal key corruption. -type ErrInternalKeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrInternalKeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrInternalKeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type keyType uint - -func (kt keyType) String() string { - switch kt { - case keyTypeDel: - return "d" - case keyTypeVal: - return "v" - } - return fmt.Sprintf("", uint(kt)) -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - keyTypeDel = keyType(0) - keyTypeVal = keyType(1) -) - -// keyTypeSeek defines the keyType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const keyTypeSeek = keyTypeVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - keyMaxSeq = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek) -) - -// Maximum number encoded in bytes. -var keyMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum) -} - -type internalKey []byte - -func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey { - if seq > keyMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > keyTypeVal { - panic("leveldb: invalid type") - } - - dst = ensureBuffer(dst, len(ukey)+8) - copy(dst, ukey) - binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt)) - return internalKey(dst) -} - -func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), keyType(num&0xff) - if kt > keyTypeVal { - return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validInternalKey(ik []byte) bool { - _, _, _, err := parseInternalKey(ik) - return err == nil -} - -func (ik internalKey) assert() { - if ik == nil { - panic("leveldb: nil internalKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik))) - } -} - -func (ik internalKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik internalKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik internalKey) parseNum() (seq uint64, kt keyType) { - num := ik.num() - seq, kt = uint64(num>>8), keyType(num&0xff) - if kt > keyTypeVal { - panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) - } - return -} - -func (ik internalKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseInternalKey(ik); err == nil { - return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } - return fmt.Sprintf("", []byte(ik)) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go deleted file mode 100644 index b661c08a..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package memdb provides in-memory key/value database implementation. -package memdb - -import ( - "math/rand" - "sync" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Common errors. -var ( - ErrNotFound = errors.ErrNotFound - ErrIterReleased = errors.New("leveldb/memdb: iterator released") -) - -const tMaxHeight = 12 - -type dbIter struct { - util.BasicReleaser - p *DB - slice *util.Range - node int - forward bool - key, value []byte - err error -} - -func (i *dbIter) fill(checkStart, checkLimit bool) bool { - if i.node != 0 { - n := i.p.nodeData[i.node] - m := n + i.p.nodeData[i.node+nKey] - i.key = i.p.kvData[n:m] - if i.slice != nil { - switch { - case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: - fallthrough - case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: - i.node = 0 - goto bail - } - } - i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] - return true - } -bail: - i.key = nil - i.value = nil - return false -} - -func (i *dbIter) Valid() bool { - return i.node != 0 -} - -func (i *dbIter) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil { - i.node, _ = i.p.findGE(i.slice.Start, false) - } else { - i.node = i.p.nodeData[nNext] - } - return i.fill(false, true) -} - -func (i *dbIter) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Limit != nil { - i.node = i.p.findLT(i.slice.Limit) - } else { - i.node = i.p.findLast() - } - return i.fill(true, false) -} - -func (i *dbIter) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { - key = i.slice.Start - } - i.node, _ = i.p.findGE(key, false) - return i.fill(false, true) -} - -func (i *dbIter) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if !i.forward { - return i.First() - } - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.nodeData[i.node+nNext] - return i.fill(false, true) -} - -func (i *dbIter) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if i.forward { - return i.Last() - } - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.findLT(i.key) - return i.fill(true, false) -} - -func (i *dbIter) Key() []byte { - return i.key -} - -func (i *dbIter) Value() []byte { - return i.value -} - -func (i *dbIter) Error() error { return i.err } - -func (i *dbIter) Release() { - if !i.Released() { - i.p = nil - i.node = 0 - i.key = nil - i.value = nil - i.BasicReleaser.Release() - } -} - -const ( - nKV = iota - nKey - nVal - nHeight - nNext -) - -// DB is an in-memory key/value database. -type DB struct { - cmp comparer.BasicComparer - rnd *rand.Rand - - mu sync.RWMutex - kvData []byte - // Node data: - // [0] : KV offset - // [1] : Key length - // [2] : Value length - // [3] : Height - // [3..height] : Next nodes - nodeData []int - prevNode [tMaxHeight]int - maxHeight int - n int - kvSize int -} - -func (p *DB) randHeight() (h int) { - const branching = 4 - h = 1 - for h < tMaxHeight && p.rnd.Int()%branching == 0 { - h++ - } - return -} - -// Must hold RW-lock if prev == true, as it use shared prevNode slice. -func (p *DB) findGE(key []byte, prev bool) (int, bool) { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - cmp := 1 - if next != 0 { - o := p.nodeData[next] - cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) - } - if cmp < 0 { - // Keep searching in this list - node = next - } else { - if prev { - p.prevNode[h] = node - } else if cmp == 0 { - return next, true - } - if h == 0 { - return next, cmp == 0 - } - h-- - } - } -} - -func (p *DB) findLT(key []byte) int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - o := p.nodeData[next] - if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -func (p *DB) findLast() int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - if next == 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (p *DB) Put(key []byte, value []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - if node, exact := p.findGE(key, true); exact { - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - p.nodeData[node] = kvOffset - m := p.nodeData[node+nVal] - p.nodeData[node+nVal] = len(value) - p.kvSize += len(value) - m - return nil - } - - h := p.randHeight() - if h > p.maxHeight { - for i := p.maxHeight; i < h; i++ { - p.prevNode[i] = 0 - } - p.maxHeight = h - } - - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - // Node - node := len(p.nodeData) - p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) - for i, n := range p.prevNode[:h] { - m := n + nNext + i - p.nodeData = append(p.nodeData, p.nodeData[m]) - p.nodeData[m] = node - } - - p.kvSize += len(key) + len(value) - p.n++ - return nil -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (p *DB) Delete(key []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - node, exact := p.findGE(key, true) - if !exact { - return ErrNotFound - } - - h := p.nodeData[node+nHeight] - for i, n := range p.prevNode[:h] { - m := n + nNext + i - p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] - } - - p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] - p.n-- - return nil -} - -// Contains returns true if the given key are in the DB. -// -// It is safe to modify the contents of the arguments after Contains returns. -func (p *DB) Contains(key []byte) bool { - p.mu.RLock() - _, exact := p.findGE(key, false) - p.mu.RUnlock() - return exact -} - -// Get gets the value for the given key. It returns error.ErrNotFound if the -// DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (p *DB) Get(key []byte) (value []byte, err error) { - p.mu.RLock() - if node, exact := p.findGE(key, false); exact { - o := p.nodeData[node] + p.nodeData[node+nKey] - value = p.kvData[o : o+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (p *DB) Find(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node, _ := p.findGE(key, false); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// NewIterator returns an iterator of the DB. -// The returned iterator is not safe for concurrent use, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. However, the resultant key/value pairs are not guaranteed -// to be a consistent snapshot of the DB at a particular point in time. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { - return &dbIter{p: p, slice: slice} -} - -// Capacity returns keys/values buffer capacity. -func (p *DB) Capacity() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) -} - -// Size returns sum of keys and values length. Note that deleted -// key/value will not be accounted for, but it will still consume -// the buffer, since the buffer is append only. -func (p *DB) Size() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.kvSize -} - -// Free returns keys/values free buffer before need to grow. -func (p *DB) Free() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) - len(p.kvData) -} - -// Len returns the number of entries in the DB. -func (p *DB) Len() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.n -} - -// Reset resets the DB to initial empty state. Allows reuse the buffer. -func (p *DB) Reset() { - p.mu.Lock() - p.rnd = rand.New(rand.NewSource(0xdeadbeef)) - p.maxHeight = 1 - p.n = 0 - p.kvSize = 0 - p.kvData = p.kvData[:0] - p.nodeData = p.nodeData[:nNext+tMaxHeight] - p.nodeData[nKV] = 0 - p.nodeData[nKey] = 0 - p.nodeData[nVal] = 0 - p.nodeData[nHeight] = tMaxHeight - for n := 0; n < tMaxHeight; n++ { - p.nodeData[nNext+n] = 0 - p.prevNode[n] = 0 - } - p.mu.Unlock() -} - -// New creates a new initialized in-memory key/value DB. The capacity -// is the initial key/value buffer capacity. The capacity is advisory, -// not enforced. -// -// This DB is append-only, deleting an entry would remove entry node but not -// reclaim KV buffer. -// -// The returned DB instance is safe for concurrent use. -func New(cmp comparer.BasicComparer, capacity int) *DB { - p := &DB{ - cmp: cmp, - rnd: rand.New(rand.NewSource(0xdeadbeef)), - maxHeight: 1, - kvData: make([]byte, 0, capacity), - nodeData: make([]int, 4+tMaxHeight), - } - p.nodeData[nHeight] = tMaxHeight - return p -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go deleted file mode 100644 index 44e7d9ad..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ /dev/null @@ -1,684 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package opt provides sets of options used by LevelDB. -package opt - -import ( - "math" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" -) - -const ( - KiB = 1024 - MiB = KiB * 1024 - GiB = MiB * 1024 -) - -var ( - DefaultBlockCacher = LRUCacher - DefaultBlockCacheCapacity = 8 * MiB - DefaultBlockRestartInterval = 16 - DefaultBlockSize = 4 * KiB - DefaultCompactionExpandLimitFactor = 25 - DefaultCompactionGPOverlapsFactor = 10 - DefaultCompactionL0Trigger = 4 - DefaultCompactionSourceLimitFactor = 1 - DefaultCompactionTableSize = 2 * MiB - DefaultCompactionTableSizeMultiplier = 1.0 - DefaultCompactionTotalSize = 10 * MiB - DefaultCompactionTotalSizeMultiplier = 10.0 - DefaultCompressionType = SnappyCompression - DefaultIteratorSamplingRate = 1 * MiB - DefaultOpenFilesCacher = LRUCacher - DefaultOpenFilesCacheCapacity = 500 - DefaultWriteBuffer = 4 * MiB - DefaultWriteL0PauseTrigger = 12 - DefaultWriteL0SlowdownTrigger = 8 -) - -// Cacher is a caching algorithm. -type Cacher interface { - New(capacity int) cache.Cacher -} - -type CacherFunc struct { - NewFunc func(capacity int) cache.Cacher -} - -func (f *CacherFunc) New(capacity int) cache.Cacher { - if f.NewFunc != nil { - return f.NewFunc(capacity) - } - return nil -} - -func noCacher(int) cache.Cacher { return nil } - -var ( - // LRUCacher is the LRU-cache algorithm. - LRUCacher = &CacherFunc{cache.NewLRU} - - // NoCacher is the value to disable caching algorithm. - NoCacher = &CacherFunc{} -) - -// Compression is the 'sorted table' block compression algorithm to use. -type Compression uint - -func (c Compression) String() string { - switch c { - case DefaultCompression: - return "default" - case NoCompression: - return "none" - case SnappyCompression: - return "snappy" - } - return "invalid" -} - -const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - nCompression -) - -// Strict is the DB 'strict level'. -type Strict uint - -const ( - // If present then a corrupted or invalid chunk or block in manifest - // journal will cause an error instead of being dropped. - // This will prevent database with corrupted manifest to be opened. - StrictManifest Strict = 1 << iota - - // If present then journal chunk checksum will be verified. - StrictJournalChecksum - - // If present then a corrupted or invalid chunk or block in journal - // will cause an error instead of being dropped. - // This will prevent database with corrupted journal to be opened. - StrictJournal - - // If present then 'sorted table' block checksum will be verified. - // This has effect on both 'read operation' and compaction. - StrictBlockChecksum - - // If present then a corrupted 'sorted table' will fails compaction. - // The database will enter read-only mode. - StrictCompaction - - // If present then a corrupted 'sorted table' will halts 'read operation'. - StrictReader - - // If present then leveldb.Recover will drop corrupted 'sorted table'. - StrictRecovery - - // This only applicable for ReadOptions, if present then this ReadOptions - // 'strict level' will override global ones. - StrictOverride - - // StrictAll enables all strict flags. - StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery - - // DefaultStrict is the default strict flags. Specify any strict flags - // will override default strict flags as whole (i.e. not OR'ed). - DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader - - // NoStrict disables all strict flags. Override default strict flags. - NoStrict = ^StrictAll -) - -// Options holds the optional parameters for the DB at large. -type Options struct { - // AltFilters defines one or more 'alternative filters'. - // 'alternative filters' will be used during reads if a filter block - // does not match with the 'effective filter'. - // - // The default value is nil - AltFilters []filter.Filter - - // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - BlockCacher Cacher - - // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. - // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. - // - // The default value is 8MiB. - BlockCacheCapacity int - - // BlockRestartInterval is the number of keys between restart points for - // delta encoding of keys. - // - // The default value is 16. - BlockRestartInterval int - - // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' - // block. - // - // The default value is 4KiB. - BlockSize int - - // CompactionExpandLimitFactor limits compaction size after expanded. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 25. - CompactionExpandLimitFactor int - - // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a - // single 'sorted table' generates. - // This will be multiplied by table size limit at grandparent level. - // - // The default value is 10. - CompactionGPOverlapsFactor int - - // CompactionL0Trigger defines number of 'sorted table' at level-0 that will - // trigger compaction. - // - // The default value is 4. - CompactionL0Trigger int - - // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to - // level-0. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 1. - CompactionSourceLimitFactor int - - // CompactionTableSize limits size of 'sorted table' that compaction generates. - // The limits for each level will be calculated as: - // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. - // - // The default value is 2MiB. - CompactionTableSize int - - // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. - // - // The default value is 1. - CompactionTableSizeMultiplier float64 - - // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTableSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTableSizeMultiplierPerLevel []float64 - - // CompactionTotalSize limits total size of 'sorted table' for each level. - // The limits for each level will be calculated as: - // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using - // CompactionTotalSizeMultiplierPerLevel. - // - // The default value is 10MiB. - CompactionTotalSize int - - // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. - // - // The default value is 10. - CompactionTotalSizeMultiplier float64 - - // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTotalSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTotalSizeMultiplierPerLevel []float64 - - // Comparer defines a total ordering over the space of []byte keys: a 'less - // than' relationship. The same comparison algorithm must be used for reads - // and writes over the lifetime of the DB. - // - // The default value uses the same ordering as bytes.Compare. - Comparer comparer.Comparer - - // Compression defines the 'sorted table' block compression to use. - // - // The default value (DefaultCompression) uses snappy compression. - Compression Compression - - // DisableBufferPool allows disable use of util.BufferPool functionality. - // - // The default value is false. - DisableBufferPool bool - - // DisableBlockCache allows disable use of cache.Cache functionality on - // 'sorted table' block. - // - // The default value is false. - DisableBlockCache bool - - // DisableCompactionBackoff allows disable compaction retry backoff. - // - // The default value is false. - DisableCompactionBackoff bool - - // DisableLargeBatchTransaction allows disabling switch-to-transaction mode - // on large batch write. If enable batch writes large than WriteBuffer will - // use transaction. - // - // The default is false. - DisableLargeBatchTransaction bool - - // ErrorIfExist defines whether an error should returned if the DB already - // exist. - // - // The default value is false. - ErrorIfExist bool - - // ErrorIfMissing defines whether an error should returned if the DB is - // missing. If false then the database will be created if missing, otherwise - // an error will be returned. - // - // The default value is false. - ErrorIfMissing bool - - // Filter defines an 'effective filter' to use. An 'effective filter' - // if defined will be used to generate per-table filter block. - // The filter name will be stored on disk. - // During reads LevelDB will try to find matching filter from - // 'effective filter' and 'alternative filters'. - // - // Filter can be changed after a DB has been created. It is recommended - // to put old filter to the 'alternative filters' to mitigate lack of - // filter during transition period. - // - // A filter is used to reduce disk reads when looking for a specific key. - // - // The default value is nil. - Filter filter.Filter - - // IteratorSamplingRate defines approximate gap (in bytes) between read - // sampling of an iterator. The samples will be used to determine when - // compaction should be triggered. - // - // The default is 1MiB. - IteratorSamplingRate int - - // NoSync allows completely disable fsync. - // - // The default is false. - NoSync bool - - // NoWriteMerge allows disabling write merge. - // - // The default is false. - NoWriteMerge bool - - // OpenFilesCacher provides cache algorithm for open files caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - OpenFilesCacher Cacher - - // OpenFilesCacheCapacity defines the capacity of the open files caching. - // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. - // - // The default value is 500. - OpenFilesCacheCapacity int - - // If true then opens DB in read-only mode. - // - // The default value is false. - ReadOnly bool - - // Strict defines the DB strict level. - Strict Strict - - // WriteBuffer defines maximum size of a 'memdb' before flushed to - // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk - // unsorted journal. - // - // LevelDB may held up to two 'memdb' at the same time. - // - // The default value is 4MiB. - WriteBuffer int - - // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will - // pause write. - // - // The default value is 12. - WriteL0PauseTrigger int - - // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that - // will trigger write slowdown. - // - // The default value is 8. - WriteL0SlowdownTrigger int -} - -func (o *Options) GetAltFilters() []filter.Filter { - if o == nil { - return nil - } - return o.AltFilters -} - -func (o *Options) GetBlockCacher() Cacher { - if o == nil || o.BlockCacher == nil { - return DefaultBlockCacher - } else if o.BlockCacher == NoCacher { - return nil - } - return o.BlockCacher -} - -func (o *Options) GetBlockCacheCapacity() int { - if o == nil || o.BlockCacheCapacity == 0 { - return DefaultBlockCacheCapacity - } else if o.BlockCacheCapacity < 0 { - return 0 - } - return o.BlockCacheCapacity -} - -func (o *Options) GetBlockRestartInterval() int { - if o == nil || o.BlockRestartInterval <= 0 { - return DefaultBlockRestartInterval - } - return o.BlockRestartInterval -} - -func (o *Options) GetBlockSize() int { - if o == nil || o.BlockSize <= 0 { - return DefaultBlockSize - } - return o.BlockSize -} - -func (o *Options) GetCompactionExpandLimit(level int) int { - factor := DefaultCompactionExpandLimitFactor - if o != nil && o.CompactionExpandLimitFactor > 0 { - factor = o.CompactionExpandLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionGPOverlaps(level int) int { - factor := DefaultCompactionGPOverlapsFactor - if o != nil && o.CompactionGPOverlapsFactor > 0 { - factor = o.CompactionGPOverlapsFactor - } - return o.GetCompactionTableSize(level+2) * factor -} - -func (o *Options) GetCompactionL0Trigger() int { - if o == nil || o.CompactionL0Trigger == 0 { - return DefaultCompactionL0Trigger - } - return o.CompactionL0Trigger -} - -func (o *Options) GetCompactionSourceLimit(level int) int { - factor := DefaultCompactionSourceLimitFactor - if o != nil && o.CompactionSourceLimitFactor > 0 { - factor = o.CompactionSourceLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionTableSize(level int) int { - var ( - base = DefaultCompactionTableSize - mult float64 - ) - if o != nil { - if o.CompactionTableSize > 0 { - base = o.CompactionTableSize - } - if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTableSizeMultiplierPerLevel[level] - } else if o.CompactionTableSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) - } - return int(float64(base) * mult) -} - -func (o *Options) GetCompactionTotalSize(level int) int64 { - var ( - base = DefaultCompactionTotalSize - mult float64 - ) - if o != nil { - if o.CompactionTotalSize > 0 { - base = o.CompactionTotalSize - } - if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTotalSizeMultiplierPerLevel[level] - } else if o.CompactionTotalSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) - } - return int64(float64(base) * mult) -} - -func (o *Options) GetComparer() comparer.Comparer { - if o == nil || o.Comparer == nil { - return comparer.DefaultComparer - } - return o.Comparer -} - -func (o *Options) GetCompression() Compression { - if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { - return DefaultCompressionType - } - return o.Compression -} - -func (o *Options) GetDisableBufferPool() bool { - if o == nil { - return false - } - return o.DisableBufferPool -} - -func (o *Options) GetDisableBlockCache() bool { - if o == nil { - return false - } - return o.DisableBlockCache -} - -func (o *Options) GetDisableCompactionBackoff() bool { - if o == nil { - return false - } - return o.DisableCompactionBackoff -} - -func (o *Options) GetDisableLargeBatchTransaction() bool { - if o == nil { - return false - } - return o.DisableLargeBatchTransaction -} - -func (o *Options) GetErrorIfExist() bool { - if o == nil { - return false - } - return o.ErrorIfExist -} - -func (o *Options) GetErrorIfMissing() bool { - if o == nil { - return false - } - return o.ErrorIfMissing -} - -func (o *Options) GetFilter() filter.Filter { - if o == nil { - return nil - } - return o.Filter -} - -func (o *Options) GetIteratorSamplingRate() int { - if o == nil || o.IteratorSamplingRate <= 0 { - return DefaultIteratorSamplingRate - } - return o.IteratorSamplingRate -} - -func (o *Options) GetNoSync() bool { - if o == nil { - return false - } - return o.NoSync -} - -func (o *Options) GetNoWriteMerge() bool { - if o == nil { - return false - } - return o.NoWriteMerge -} - -func (o *Options) GetOpenFilesCacher() Cacher { - if o == nil || o.OpenFilesCacher == nil { - return DefaultOpenFilesCacher - } - if o.OpenFilesCacher == NoCacher { - return nil - } - return o.OpenFilesCacher -} - -func (o *Options) GetOpenFilesCacheCapacity() int { - if o == nil || o.OpenFilesCacheCapacity == 0 { - return DefaultOpenFilesCacheCapacity - } else if o.OpenFilesCacheCapacity < 0 { - return 0 - } - return o.OpenFilesCacheCapacity -} - -func (o *Options) GetReadOnly() bool { - if o == nil { - return false - } - return o.ReadOnly -} - -func (o *Options) GetStrict(strict Strict) bool { - if o == nil || o.Strict == 0 { - return DefaultStrict&strict != 0 - } - return o.Strict&strict != 0 -} - -func (o *Options) GetWriteBuffer() int { - if o == nil || o.WriteBuffer <= 0 { - return DefaultWriteBuffer - } - return o.WriteBuffer -} - -func (o *Options) GetWriteL0PauseTrigger() int { - if o == nil || o.WriteL0PauseTrigger == 0 { - return DefaultWriteL0PauseTrigger - } - return o.WriteL0PauseTrigger -} - -func (o *Options) GetWriteL0SlowdownTrigger() int { - if o == nil || o.WriteL0SlowdownTrigger == 0 { - return DefaultWriteL0SlowdownTrigger - } - return o.WriteL0SlowdownTrigger -} - -// ReadOptions holds the optional parameters for 'read operation'. The -// 'read operation' includes Get, Find and NewIterator. -type ReadOptions struct { - // DontFillCache defines whether block reads for this 'read operation' - // should be cached. If false then the block will be cached. This does - // not affects already cached block. - // - // The default value is false. - DontFillCache bool - - // Strict will be OR'ed with global DB 'strict level' unless StrictOverride - // is present. Currently only StrictReader that has effect here. - Strict Strict -} - -func (ro *ReadOptions) GetDontFillCache() bool { - if ro == nil { - return false - } - return ro.DontFillCache -} - -func (ro *ReadOptions) GetStrict(strict Strict) bool { - if ro == nil { - return false - } - return ro.Strict&strict != 0 -} - -// WriteOptions holds the optional parameters for 'write operation'. The -// 'write operation' includes Write, Put and Delete. -type WriteOptions struct { - // NoWriteMerge allows disabling write merge. - // - // The default is false. - NoWriteMerge bool - - // Sync is whether to sync underlying writes from the OS buffer cache - // through to actual disk, if applicable. Setting Sync can result in - // slower writes. - // - // If false, and the machine crashes, then some recent writes may be lost. - // Note that if it is just the process that crashes (and the machine does - // not) then no writes will be lost. - // - // In other words, Sync being false has the same semantics as a write - // system call. Sync being true means write followed by fsync. - // - // The default value is false. - Sync bool -} - -func (wo *WriteOptions) GetNoWriteMerge() bool { - if wo == nil { - return false - } - return wo.NoWriteMerge -} - -func (wo *WriteOptions) GetSync() bool { - if wo == nil { - return false - } - return wo.Sync -} - -func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { - if ro.GetStrict(StrictOverride) { - return ro.GetStrict(strict) - } else { - return o.GetStrict(strict) || ro.GetStrict(strict) - } -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go deleted file mode 100644 index b072b1ac..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/options.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func dupOptions(o *opt.Options) *opt.Options { - newo := &opt.Options{} - if o != nil { - *newo = *o - } - if newo.Strict == 0 { - newo.Strict = opt.DefaultStrict - } - return newo -} - -func (s *session) setOptions(o *opt.Options) { - no := dupOptions(o) - // Alternative filters. - if filters := o.GetAltFilters(); len(filters) > 0 { - no.AltFilters = make([]filter.Filter, len(filters)) - for i, filter := range filters { - no.AltFilters[i] = &iFilter{filter} - } - } - // Comparer. - s.icmp = &iComparer{o.GetComparer()} - no.Comparer = s.icmp - // Filter. - if filter := o.GetFilter(); filter != nil { - no.Filter = &iFilter{filter} - } - - s.o = &cachedOptions{Options: no} - s.o.cache() -} - -const optCachedLevel = 7 - -type cachedOptions struct { - *opt.Options - - compactionExpandLimit []int - compactionGPOverlaps []int - compactionSourceLimit []int - compactionTableSize []int - compactionTotalSize []int64 -} - -func (co *cachedOptions) cache() { - co.compactionExpandLimit = make([]int, optCachedLevel) - co.compactionGPOverlaps = make([]int, optCachedLevel) - co.compactionSourceLimit = make([]int, optCachedLevel) - co.compactionTableSize = make([]int, optCachedLevel) - co.compactionTotalSize = make([]int64, optCachedLevel) - - for level := 0; level < optCachedLevel; level++ { - co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) - co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) - co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) - co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) - co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) - } -} - -func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - if level < optCachedLevel { - return co.compactionExpandLimit[level] - } - return co.Options.GetCompactionExpandLimit(level) -} - -func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - if level < optCachedLevel { - return co.compactionGPOverlaps[level] - } - return co.Options.GetCompactionGPOverlaps(level) -} - -func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - if level < optCachedLevel { - return co.compactionSourceLimit[level] - } - return co.Options.GetCompactionSourceLimit(level) -} - -func (co *cachedOptions) GetCompactionTableSize(level int) int { - if level < optCachedLevel { - return co.compactionTableSize[level] - } - return co.Options.GetCompactionTableSize(level) -} - -func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - if level < optCachedLevel { - return co.compactionTotalSize[level] - } - return co.Options.GetCompactionTotalSize(level) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go deleted file mode 100644 index 3f391f93..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "io" - "os" - "sync" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// ErrManifestCorrupted records manifest corruption. This error will be -// wrapped with errors.ErrCorrupted. -type ErrManifestCorrupted struct { - Field string - Reason string -} - -func (e *ErrManifestCorrupted) Error() string { - return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) -} - -func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error { - return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason}) -} - -// session represent a persistent database session. -type session struct { - // Need 64-bit alignment. - stNextFileNum int64 // current unused file number - stJournalNum int64 // current journal file number; need external synchronization - stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb - stTempFileNum int64 - stSeqNum uint64 // last mem compacted seq; need external synchronization - - stor *iStorage - storLock storage.Locker - o *cachedOptions - icmp *iComparer - tops *tOps - fileRef map[int64]int - - manifest *journal.Writer - manifestWriter storage.Writer - manifestFd storage.FileDesc - - stCompPtrs []internalKey // compaction pointers; need external synchronization - stVersion *version // current version - vmu sync.Mutex -} - -// Creates new initialized session instance. -func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { - if stor == nil { - return nil, os.ErrInvalid - } - storLock, err := stor.Lock() - if err != nil { - return - } - s = &session{ - stor: newIStorage(stor), - storLock: storLock, - fileRef: make(map[int64]int), - } - s.setOptions(o) - s.tops = newTableOps(s) - s.setVersion(newVersion(s)) - s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") - return -} - -// Close session. -func (s *session) close() { - s.tops.close() - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - s.manifest = nil - s.manifestWriter = nil - s.setVersion(&version{s: s, closing: true}) -} - -// Release session lock. -func (s *session) release() { - s.storLock.Unlock() -} - -// Create a new database session; need external synchronization. -func (s *session) create() error { - // create manifest - return s.newManifest(nil, nil) -} - -// Recover a database session; need external synchronization. -func (s *session) recover() (err error) { - defer func() { - if os.IsNotExist(err) { - // Don't return os.ErrNotExist if the underlying storage contains - // other files that belong to LevelDB. So the DB won't get trashed. - if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { - err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} - } - } - }() - - fd, err := s.stor.GetMeta() - if err != nil { - return - } - - reader, err := s.stor.Open(fd) - if err != nil { - return - } - defer reader.Close() - - var ( - // Options. - strict = s.o.GetStrict(opt.StrictManifest) - - jr = journal.NewReader(reader, dropper{s, fd}, strict, true) - rec = &sessionRecord{} - staging = s.stVersion.newStaging() - ) - for { - var r io.Reader - r, err = jr.Next() - if err != nil { - if err == io.EOF { - err = nil - break - } - return errors.SetFd(err, fd) - } - - err = rec.decode(r) - if err == nil { - // save compact pointers - for _, r := range rec.compPtrs { - s.setCompPtr(r.level, internalKey(r.ikey)) - } - // commit record to version staging - staging.commit(rec) - } else { - err = errors.SetFd(err, fd) - if strict || !errors.IsCorrupted(err) { - return - } - s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) - } - rec.resetCompPtrs() - rec.resetAddedTables() - rec.resetDeletedTables() - } - - switch { - case !rec.has(recComparer): - return newErrManifestCorrupted(fd, "comparer", "missing") - case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) - case !rec.has(recNextFileNum): - return newErrManifestCorrupted(fd, "next-file-num", "missing") - case !rec.has(recJournalNum): - return newErrManifestCorrupted(fd, "journal-file-num", "missing") - case !rec.has(recSeqNum): - return newErrManifestCorrupted(fd, "seq-num", "missing") - } - - s.manifestFd = fd - s.setVersion(staging.finish()) - s.setNextFileNum(rec.nextFileNum) - s.recordCommited(rec) - return nil -} - -// Commit session; need external synchronization. -func (s *session) commit(r *sessionRecord) (err error) { - v := s.version() - defer v.release() - - // spawn new version based on current version - nv := v.spawn(r) - - if s.manifest == nil { - // manifest journal writer not yet created, create one - err = s.newManifest(r, nv) - } else { - err = s.flushManifest(r) - } - - // finally, apply new version if no error rise - if err == nil { - s.setVersion(nv) - } - - return -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go deleted file mode 100644 index 089cd00b..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int { - v := s.version() - defer v.release() - return v.pickMemdbLevel(umin, umax, maxLevel) -} - -func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) { - // Create sorted table. - iter := mdb.NewIterator(nil) - defer iter.Release() - t, n, err := s.tops.createFrom(iter) - if err != nil { - return 0, err - } - - // Pick level other than zero can cause compaction issue with large - // bulk insert and delete on strictly incrementing key-space. The - // problem is that the small deletion markers trapped at lower level, - // while key/value entries keep growing at higher level. Since the - // key-space is strictly incrementing it will not overlaps with - // higher level, thus maximum possible level is always picked, while - // overlapping deletion marker pushed into lower level. - // See: https://github.com/syndtr/goleveldb/issues/127. - flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) - rec.addTableFile(flushLevel, t) - - s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) - return flushLevel, nil -} - -// Pick a compaction based on current state; need external synchronization. -func (s *session) pickCompaction() *compaction { - v := s.version() - - var sourceLevel int - var t0 tFiles - if v.cScore >= 1 { - sourceLevel = v.cLevel - cptr := s.getCompPtr(sourceLevel) - tables := v.levels[sourceLevel] - for _, t := range tables { - if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { - t0 = append(t0, t) - break - } - } - if len(t0) == 0 { - t0 = append(t0, tables[0]) - } - } else { - if p := atomic.LoadPointer(&v.cSeek); p != nil { - ts := (*tSet)(p) - sourceLevel = ts.level - t0 = append(t0, ts.table) - } else { - v.release() - return nil - } - } - - return newCompaction(s, v, sourceLevel, t0) -} - -// Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction { - v := s.version() - - if sourceLevel >= len(v.levels) { - v.release() - return nil - } - - t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0) - if len(t0) == 0 { - v.release() - return nil - } - - // Avoid compacting too much in one shot in case the range is large. - // But we cannot do this for level-0 since level-0 files can overlap - // and we must not pick one file and drop another older file if the - // two files overlap. - if !noLimit && sourceLevel > 0 { - limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel)) - total := int64(0) - for i, t := range t0 { - total += t.size - if total >= limit { - s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) - t0 = t0[:i+1] - break - } - } - } - - return newCompaction(s, v, sourceLevel, t0) -} - -func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction { - c := &compaction{ - s: s, - v: v, - sourceLevel: sourceLevel, - levels: [2]tFiles{t0, nil}, - maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)), - tPtrs: make([]int, len(v.levels)), - } - c.expand() - c.save() - return c -} - -// compaction represent a compaction state. -type compaction struct { - s *session - v *version - - sourceLevel int - levels [2]tFiles - maxGPOverlaps int64 - - gp tFiles - gpi int - seenKey bool - gpOverlappedBytes int64 - imin, imax internalKey - tPtrs []int - released bool - - snapGPI int - snapSeenKey bool - snapGPOverlappedBytes int64 - snapTPtrs []int -} - -func (c *compaction) save() { - c.snapGPI = c.gpi - c.snapSeenKey = c.seenKey - c.snapGPOverlappedBytes = c.gpOverlappedBytes - c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) -} - -func (c *compaction) restore() { - c.gpi = c.snapGPI - c.seenKey = c.snapSeenKey - c.gpOverlappedBytes = c.snapGPOverlappedBytes - c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) -} - -func (c *compaction) release() { - if !c.released { - c.released = true - c.v.release() - } -} - -// Expand compacted tables; need external synchronization. -func (c *compaction) expand() { - limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel)) - vt0 := c.v.levels[c.sourceLevel] - vt1 := tFiles{} - if level := c.sourceLevel + 1; level < len(c.v.levels) { - vt1 = c.v.levels[level] - } - - t0, t1 := c.levels[0], c.levels[1] - imin, imax := t0.getRange(c.s.icmp) - // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0) - if len(t0) != len(c.levels[0]) { - imin, imax = t0.getRange(c.s.icmp) - } - t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) - // Get entire range covered by compaction. - amin, amax := append(t0, t1...).getRange(c.s.icmp) - - // See if we can grow the number of inputs in "sourceLevel" without - // changing the number of "sourceLevel+1" files we pick up. - if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0) - if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { - xmin, xmax := exp0.getRange(c.s.icmp) - exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) - if len(exp1) == len(t1) { - c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) - imin, imax = xmin, xmax - t0, t1 = exp0, exp1 - amin, amax = append(t0, t1...).getRange(c.s.icmp) - } - } - } - - // Compute the set of grandparent files that overlap this compaction - // (parent == sourceLevel+1; grandparent == sourceLevel+2) - if level := c.sourceLevel + 2; level < len(c.v.levels) { - c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) - } - - c.levels[0], c.levels[1] = t0, t1 - c.imin, c.imax = imin, imax -} - -// Check whether compaction is trivial. -func (c *compaction) trivial() bool { - return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps -} - -func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level := c.sourceLevel + 2; level < len(c.v.levels); level++ { - tables := c.v.levels[level] - for c.tPtrs[level] < len(tables) { - t := tables[c.tPtrs[level]] - if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { - // We've advanced far enough. - if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - // Key falls in this file's range, so definitely not base level. - return false - } - break - } - c.tPtrs[level]++ - } - } - return true -} - -func (c *compaction) shouldStopBefore(ikey internalKey) bool { - for ; c.gpi < len(c.gp); c.gpi++ { - gp := c.gp[c.gpi] - if c.s.icmp.Compare(ikey, gp.imax) <= 0 { - break - } - if c.seenKey { - c.gpOverlappedBytes += gp.size - } - } - c.seenKey = true - - if c.gpOverlappedBytes > c.maxGPOverlaps { - // Too much overlap for current output; start new output. - c.gpOverlappedBytes = 0 - return true - } - return false -} - -// Creates an iterator. -func (c *compaction) newIterator() iterator.Iterator { - // Creates iterator slice. - icap := len(c.levels) - if c.sourceLevel == 0 { - // Special case for level-0. - icap = len(c.levels[0]) + 1 - } - its := make([]iterator.Iterator, 0, icap) - - // Options. - ro := &opt.ReadOptions{ - DontFillCache: true, - Strict: opt.StrictOverride, - } - strict := c.s.o.GetStrict(opt.StrictCompaction) - if strict { - ro.Strict |= opt.StrictReader - } - - for i, tables := range c.levels { - if len(tables) == 0 { - continue - } - - // Level-0 is not sorted and may overlaps each other. - if c.sourceLevel+i == 0 { - for _, t := range tables { - its = append(its, c.s.tops.newIterator(t, nil, ro)) - } - } else { - it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) - its = append(its, it) - } - } - - return iterator.NewMergedIterator(its, c.s.icmp, strict) -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go deleted file mode 100644 index 854e1aa6..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bufio" - "encoding/binary" - "io" - "strings" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -type byteReader interface { - io.Reader - io.ByteReader -} - -// These numbers are written to disk and should not be changed. -const ( - recComparer = 1 - recJournalNum = 2 - recNextFileNum = 3 - recSeqNum = 4 - recCompPtr = 5 - recDelTable = 6 - recAddTable = 7 - // 8 was used for large value refs - recPrevJournalNum = 9 -) - -type cpRecord struct { - level int - ikey internalKey -} - -type atRecord struct { - level int - num int64 - size int64 - imin internalKey - imax internalKey -} - -type dtRecord struct { - level int - num int64 -} - -type sessionRecord struct { - hasRec int - comparer string - journalNum int64 - prevJournalNum int64 - nextFileNum int64 - seqNum uint64 - compPtrs []cpRecord - addedTables []atRecord - deletedTables []dtRecord - - scratch [binary.MaxVarintLen64]byte - err error -} - -func (p *sessionRecord) has(rec int) bool { - return p.hasRec&(1< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// Logging. - -type dropper struct { - s *session - fd storage.FileDesc -} - -func (d dropper) Drop(err error) { - if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) - } else { - d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) - } -} - -func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } -func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } - -// File utils. - -func (s *session) newTemp() storage.FileDesc { - num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 - return storage.FileDesc{storage.TypeTemp, num} -} - -func (s *session) addFileRef(fd storage.FileDesc, ref int) int { - ref += s.fileRef[fd.Num] - if ref > 0 { - s.fileRef[fd.Num] = ref - } else if ref == 0 { - delete(s.fileRef, fd.Num) - } else { - panic(fmt.Sprintf("negative ref: %v", fd)) - } - return ref -} - -// Session state. - -// Get current version. This will incr version ref, must call -// version.release (exactly once) after use. -func (s *session) version() *version { - s.vmu.Lock() - defer s.vmu.Unlock() - s.stVersion.incref() - return s.stVersion -} - -func (s *session) tLen(level int) int { - s.vmu.Lock() - defer s.vmu.Unlock() - return s.stVersion.tLen(level) -} - -// Set current version to v. -func (s *session) setVersion(v *version) { - s.vmu.Lock() - defer s.vmu.Unlock() - // Hold by session. It is important to call this first before releasing - // current version, otherwise the still used files might get released. - v.incref() - if s.stVersion != nil { - // Release current version. - s.stVersion.releaseNB() - } - s.stVersion = v -} - -// Get current unused file number. -func (s *session) nextFileNum() int64 { - return atomic.LoadInt64(&s.stNextFileNum) -} - -// Set current unused file number to num. -func (s *session) setNextFileNum(num int64) { - atomic.StoreInt64(&s.stNextFileNum, num) -} - -// Mark file number as used. -func (s *session) markFileNum(num int64) { - nextFileNum := num + 1 - for { - old, x := s.stNextFileNum, nextFileNum - if old > x { - x = old - } - if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Allocate a file number. -func (s *session) allocFileNum() int64 { - return atomic.AddInt64(&s.stNextFileNum, 1) - 1 -} - -// Reuse given file number. -func (s *session) reuseFileNum(num int64) { - for { - old, x := s.stNextFileNum, num - if old != x+1 { - x = old - } - if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Set compaction ptr at given level; need external synchronization. -func (s *session) setCompPtr(level int, ik internalKey) { - if level >= len(s.stCompPtrs) { - newCompPtrs := make([]internalKey, level+1) - copy(newCompPtrs, s.stCompPtrs) - s.stCompPtrs = newCompPtrs - } - s.stCompPtrs[level] = append(internalKey{}, ik...) -} - -// Get compaction ptr at given level; need external synchronization. -func (s *session) getCompPtr(level int) internalKey { - if level >= len(s.stCompPtrs) { - return nil - } - return s.stCompPtrs[level] -} - -// Manifest related utils. - -// Fill given session record obj with current states; need external -// synchronization. -func (s *session) fillRecord(r *sessionRecord, snapshot bool) { - r.setNextFileNum(s.nextFileNum()) - - if snapshot { - if !r.has(recJournalNum) { - r.setJournalNum(s.stJournalNum) - } - - if !r.has(recSeqNum) { - r.setSeqNum(s.stSeqNum) - } - - for level, ik := range s.stCompPtrs { - if ik != nil { - r.addCompPtr(level, ik) - } - } - - r.setComparer(s.icmp.uName()) - } -} - -// Mark if record has been committed, this will update session state; -// need external synchronization. -func (s *session) recordCommited(rec *sessionRecord) { - if rec.has(recJournalNum) { - s.stJournalNum = rec.journalNum - } - - if rec.has(recPrevJournalNum) { - s.stPrevJournalNum = rec.prevJournalNum - } - - if rec.has(recSeqNum) { - s.stSeqNum = rec.seqNum - } - - for _, r := range rec.compPtrs { - s.setCompPtr(r.level, internalKey(r.ikey)) - } -} - -// Create a new manifest file; need external synchronization. -func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()} - writer, err := s.stor.Create(fd) - if err != nil { - return - } - jw := journal.NewWriter(writer) - - if v == nil { - v = s.version() - defer v.release() - } - if rec == nil { - rec = &sessionRecord{} - } - s.fillRecord(rec, true) - v.fillRecord(rec) - - defer func() { - if err == nil { - s.recordCommited(rec) - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - if !s.manifestFd.Zero() { - s.stor.Remove(s.manifestFd) - } - s.manifestFd = fd - s.manifestWriter = writer - s.manifest = jw - } else { - writer.Close() - s.stor.Remove(fd) - s.reuseFileNum(fd.Num) - } - }() - - w, err := jw.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = jw.Flush() - if err != nil { - return - } - err = s.stor.SetMeta(fd) - return -} - -// Flush record to disk. -func (s *session) flushManifest(rec *sessionRecord) (err error) { - s.fillRecord(rec, false) - w, err := s.manifest.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = s.manifest.Flush() - if err != nil { - return - } - if !s.o.GetNoSync() { - err = s.manifestWriter.Sync() - if err != nil { - return - } - } - s.recordCommited(rec) - return -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go deleted file mode 100644 index d45fb5df..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go +++ /dev/null @@ -1,63 +0,0 @@ -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/storage" - "sync/atomic" -) - -type iStorage struct { - storage.Storage - read uint64 - write uint64 -} - -func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) { - r, err := c.Storage.Open(fd) - return &iStorageReader{r, c}, err -} - -func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) { - w, err := c.Storage.Create(fd) - return &iStorageWriter{w, c}, err -} - -func (c *iStorage) reads() uint64 { - return atomic.LoadUint64(&c.read) -} - -func (c *iStorage) writes() uint64 { - return atomic.LoadUint64(&c.write) -} - -// newIStorage returns the given storage wrapped by iStorage. -func newIStorage(s storage.Storage) *iStorage { - return &iStorage{s, 0, 0} -} - -type iStorageReader struct { - storage.Reader - c *iStorage -} - -func (r *iStorageReader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - atomic.AddUint64(&r.c.read, uint64(n)) - return n, err -} - -func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) { - n, err = r.Reader.ReadAt(p, off) - atomic.AddUint64(&r.c.read, uint64(n)) - return n, err -} - -type iStorageWriter struct { - storage.Writer - c *iStorage -} - -func (w *iStorageWriter) Write(p []byte) (n int, err error) { - n, err = w.Writer.Write(p) - atomic.AddUint64(&w.c.write, uint64(n)) - return n, err -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 9ba71fd6..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -var ( - errFileOpen = errors.New("leveldb/storage: file still open") - errReadOnly = errors.New("leveldb/storage: storage is read-only") -) - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Unlock() { - if lock.fs != nil { - lock.fs.mu.Lock() - defer lock.fs.mu.Unlock() - if lock.fs.slock == lock { - lock.fs.slock = nil - } - } -} - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func writeFileSynced(filename string, data []byte, perm os.FileMode) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Sync(); err == nil { - err = err1 - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -const logSizeThreshold = 1024 * 1024 // 1 MiB - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - readOnly bool - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - logSize int64 - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesystem-backed storage implementation with the given -// path. This also acquire a file lock, so any subsequent attempt to open the -// same path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string, readOnly bool) (Storage, error) { - if fi, err := os.Stat(path); err == nil { - if !fi.IsDir() { - return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path) - } - } else if os.IsNotExist(err) && !readOnly { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - } else { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - var ( - logw *os.File - logSize int64 - ) - if !readOnly { - logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - logSize, err = logw.Seek(0, os.SEEK_END) - if err != nil { - logw.Close() - return nil, err - } - } - - fs := &fileStorage{ - path: path, - readOnly: readOnly, - flock: flock, - logw: logw, - logSize: logSize, - } - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (Locker, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.readOnly { - return &fileStorageLock{}, nil - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - u := uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - if fs.logSize > logSizeThreshold { - // Rotate log file. - fs.logw.Close() - fs.logw = nil - fs.logSize = 0 - rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) - } - if fs.logw == nil { - var err error - fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return - } - // Force printDay on new log file. - fs.day = 0 - } - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - n, _ := fs.logw.Write(fs.buf) - fs.logSize += int64(n) -} - -func (fs *fileStorage) Log(str string) { - if !fs.readOnly { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) - } -} - -func (fs *fileStorage) log(str string) { - if !fs.readOnly { - fs.doLog(time.Now(), str) - } -} - -func (fs *fileStorage) setMeta(fd FileDesc) error { - content := fsGenName(fd) + "\n" - // Check and backup old CURRENT file. - currentPath := filepath.Join(fs.path, "CURRENT") - if _, err := os.Stat(currentPath); err == nil { - b, err := ioutil.ReadFile(currentPath) - if err != nil { - fs.log(fmt.Sprintf("backup CURRENT: %v", err)) - return err - } - if string(b) == content { - // Content not changed, do nothing. - return nil - } - if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil { - fs.log(fmt.Sprintf("backup CURRENT: %v", err)) - return err - } - } else if !os.IsNotExist(err) { - return err - } - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) - if err := writeFileSynced(path, []byte(content), 0644); err != nil { - fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err)) - return err - } - // Replace CURRENT file. - if err := rename(path, currentPath); err != nil { - fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) - return err - } - // Sync root directory. - if err := syncDir(fs.path); err != nil { - fs.log(fmt.Sprintf("syncDir: %v", err)) - return err - } - return nil -} - -func (fs *fileStorage) SetMeta(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - if fs.readOnly { - return errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - return fs.setMeta(fd) -} - -func (fs *fileStorage) GetMeta() (FileDesc, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return FileDesc{}, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return FileDesc{}, err - } - names, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if ce := dir.Close(); ce != nil { - fs.log(fmt.Sprintf("close dir: %v", ce)) - } - if err != nil { - return FileDesc{}, err - } - // Try this in order: - // - CURRENT.[0-9]+ ('pending rename' file, descending order) - // - CURRENT - // - CURRENT.bak - // - // Skip corrupted file or file that point to a missing target file. - type currentFile struct { - name string - fd FileDesc - } - tryCurrent := func(name string) (*currentFile, error) { - b, err := ioutil.ReadFile(filepath.Join(fs.path, name)) - if err != nil { - if os.IsNotExist(err) { - err = os.ErrNotExist - } - return nil, err - } - var fd FileDesc - if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) { - fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b)) - err := &ErrCorrupted{ - Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"), - } - return nil, err - } - if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil { - if os.IsNotExist(err) { - fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd)) - err = os.ErrNotExist - } - return nil, err - } - return ¤tFile{name: name, fd: fd}, nil - } - tryCurrents := func(names []string) (*currentFile, error) { - var ( - cur *currentFile - // Last corruption error. - lastCerr error - ) - for _, name := range names { - var err error - cur, err = tryCurrent(name) - if err == nil { - break - } else if err == os.ErrNotExist { - // Fallback to the next file. - } else if isCorrupted(err) { - lastCerr = err - // Fallback to the next file. - } else { - // In case the error is due to permission, etc. - return nil, err - } - } - if cur == nil { - err := os.ErrNotExist - if lastCerr != nil { - err = lastCerr - } - return nil, err - } - return cur, nil - } - - // Try 'pending rename' files. - var nums []int64 - for _, name := range names { - if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" { - i, err := strconv.ParseInt(name[8:], 10, 64) - if err == nil { - nums = append(nums, i) - } - } - } - var ( - pendCur *currentFile - pendErr = os.ErrNotExist - pendNames []string - ) - if len(nums) > 0 { - sort.Sort(sort.Reverse(int64Slice(nums))) - pendNames = make([]string, len(nums)) - for i, num := range nums { - pendNames[i] = fmt.Sprintf("CURRENT.%d", num) - } - pendCur, pendErr = tryCurrents(pendNames) - if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) { - return FileDesc{}, pendErr - } - } - - // Try CURRENT and CURRENT.bak. - curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"}) - if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) { - return FileDesc{}, curErr - } - - // pendCur takes precedence, but guards against obsolete pendCur. - if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) { - curCur = pendCur - } - - if curCur != nil { - // Restore CURRENT file to proper state. - if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) { - // Ignore setMeta errors, however don't delete obsolete files if we - // catch error. - if err := fs.setMeta(curCur.fd); err == nil { - // Remove 'pending rename' files. - for _, name := range pendNames { - if err := os.Remove(filepath.Join(fs.path, name)); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", name, err)) - } - } - } - } - return curCur.fd, nil - } - - // Nothing found. - if isCorrupted(pendErr) { - return FileDesc{}, pendErr - } - return FileDesc{}, curErr -} - -func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - names, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if cerr := dir.Close(); cerr != nil { - fs.log(fmt.Sprintf("close dir: %v", cerr)) - } - if err == nil { - for _, name := range names { - if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 { - fds = append(fds, fd) - } - } - } - return -} - -func (fs *fileStorage) Open(fd FileDesc) (Reader, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0) - if err != nil { - if fsHasOldName(fd) && os.IsNotExist(err) { - of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - fs.open++ - return &fileWrap{File: of, fs: fs, fd: fd}, nil -} - -func (fs *fileStorage) Create(fd FileDesc) (Writer, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - if fs.readOnly { - return nil, errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - fs.open++ - return &fileWrap{File: of, fs: fs, fd: fd}, nil -} - -func (fs *fileStorage) Remove(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - if fs.readOnly { - return errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - err := os.Remove(filepath.Join(fs.path, fsGenName(fd))) - if err != nil { - if fsHasOldName(fd) && os.IsNotExist(err) { - if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) { - fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err)) - err = e1 - } - } else { - fs.log(fmt.Sprintf("remove %s: %v", fd, err)) - } - } - return err -} - -func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error { - if !FileDescOk(oldfd) || !FileDescOk(newfd) { - return ErrInvalidFile - } - if oldfd == newfd { - return nil - } - if fs.readOnly { - return errReadOnly - } - - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd))) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) - } - fs.open = -1 - if fs.logw != nil { - fs.logw.Close() - } - return fs.flock.release() -} - -type fileWrap struct { - *os.File - fs *fileStorage - fd FileDesc - closed bool -} - -func (fw *fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.fd.Type == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.fs.path); err != nil { - fw.fs.log(fmt.Sprintf("syncDir: %v", err)) - return err - } - } - return nil -} - -func (fw *fileWrap) Close() error { - fw.fs.mu.Lock() - defer fw.fs.mu.Unlock() - if fw.closed { - return ErrClosed - } - fw.closed = true - fw.fs.open-- - err := fw.File.Close() - if err != nil { - fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err)) - } - return err -} - -func fsGenName(fd FileDesc) string { - switch fd.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fd.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fd.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fd.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fd.Num) - default: - panic("invalid file type") - } -} - -func fsHasOldName(fd FileDesc) bool { - return fd.Type == TypeTable -} - -func fsGenOldName(fd FileDesc) string { - switch fd.Type { - case TypeTable: - return fmt.Sprintf("%06d.sst", fd.Num) - } - return fsGenName(fd) -} - -func fsParseName(name string) (fd FileDesc, ok bool) { - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail) - if err == nil { - switch tail { - case "log": - fd.Type = TypeJournal - case "ldb", "sst": - fd.Type = TypeTable - case "tmp": - fd.Type = TypeTemp - default: - return - } - return fd, true - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail) - if n == 1 { - fd.Type = TypeManifest - return fd, true - } - return -} - -func fsParseNamePtr(name string, fd *FileDesc) bool { - _fd, ok := fsParseName(name) - if fd != nil { - *fd = _fd - } - return ok -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go deleted file mode 100644 index 5545aeef..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build nacl - -package storage - -import ( - "os" - "syscall" -) - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - return nil, syscall.ENOTSUP -} - -func setFileLock(f *os.File, readOnly, lock bool) error { - return syscall.ENOTSUP -} - -func rename(oldpath, newpath string) error { - return syscall.ENOTSUP -} - -func isErrInvalid(err error) bool { - return false -} - -func syncDir(name string) error { - return syscall.ENOTSUP -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go deleted file mode 100644 index b8297980..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "os" -) - -type plan9FileLock struct { - f *os.File -} - -func (fl *plan9FileLock) release() error { - return fl.f.Close() -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - var ( - flag int - perm os.FileMode - ) - if readOnly { - flag = os.O_RDONLY - } else { - flag = os.O_RDWR - perm = os.ModeExclusive - } - f, err := os.OpenFile(path, flag, perm) - if os.IsNotExist(err) { - f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644) - } - if err != nil { - return - } - fl = &plan9FileLock{f: f} - return -} - -func rename(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err := os.Remove(newpath); err != nil { - return err - } - } - - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go deleted file mode 100644 index 79901ee4..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build solaris - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - var flag int - if readOnly { - flag = os.O_RDONLY - } else { - flag = os.O_RDWR - } - f, err := os.OpenFile(path, flag, 0) - if os.IsNotExist(err) { - f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) - } - if err != nil { - return - } - err = setFileLock(f, readOnly, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, readOnly, lock bool) error { - flock := syscall.Flock_t{ - Type: syscall.F_UNLCK, - Start: 0, - Len: 0, - Whence: 1, - } - if lock { - if readOnly { - flock.Type = syscall.F_RDLCK - } else { - flock.Type = syscall.F_WRLCK - } - } - return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go deleted file mode 100644 index d75f66a9..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - var flag int - if readOnly { - flag = os.O_RDONLY - } else { - flag = os.O_RDWR - } - f, err := os.OpenFile(path, flag, 0) - if os.IsNotExist(err) { - f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) - } - if err != nil { - return - } - err = setFileLock(f, readOnly, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, readOnly, lock bool) error { - how := syscall.LOCK_UN - if lock { - if readOnly { - how = syscall.LOCK_SH - } else { - how = syscall.LOCK_EX - } - } - return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func isErrInvalid(err error) bool { - if err == os.ErrInvalid { - return true - } - // Go < 1.8 - if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { - return true - } - // Go >= 1.8 returns *os.PathError instead - if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL { - return true - } - return false -} - -func syncDir(name string) error { - // As per fsync manpage, Linux seems to expect fsync on directory, however - // some system don't support this, so we will ignore syscall.EINVAL. - // - // From fsync(2): - // Calling fsync() does not necessarily ensure that the entry in the - // directory containing the file has also reached disk. For that an - // explicit fsync() on a file descriptor for the directory is also needed. - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil && !isErrInvalid(err) { - return err - } - return nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go deleted file mode 100644 index 899335fd..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procMoveFileExW = modkernel32.NewProc("MoveFileExW") -) - -const ( - _MOVEFILE_REPLACE_EXISTING = 1 -) - -type windowsFileLock struct { - fd syscall.Handle -} - -func (fl *windowsFileLock) release() error { - return syscall.Close(fl.fd) -} - -func newFileLock(path string, readOnly bool) (fl fileLock, err error) { - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return - } - var access, shareMode uint32 - if readOnly { - access = syscall.GENERIC_READ - shareMode = syscall.FILE_SHARE_READ - } else { - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - } - fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err == syscall.ERROR_FILE_NOT_FOUND { - fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - } - if err != nil { - return - } - fl = &windowsFileLock{fd: fd} - return -} - -func moveFileEx(from *uint16, to *uint16, flags uint32) error { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - return error(e1) - } - return syscall.EINVAL - } - return nil -} - -func rename(oldpath, newpath string) error { - from, err := syscall.UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := syscall.UTF16PtrFromString(newpath) - if err != nil { - return err - } - return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) -} - -func syncDir(name string) error { return nil } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index 838f1bee..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" -) - -const typeShift = 4 - -// Verify at compile-time that typeShift is large enough to cover all FileType -// values by confirming that 0 == 0. -var _ [0]struct{} = [TypeAll >> typeShift]struct{}{} - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Unlock() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - meta FileDesc -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (Locker, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) SetMeta(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - - ms.mu.Lock() - ms.meta = fd - ms.mu.Unlock() - return nil -} - -func (ms *memStorage) GetMeta() (FileDesc, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.meta.Zero() { - return FileDesc{}, os.ErrNotExist - } - return ms.meta, nil -} - -func (ms *memStorage) List(ft FileType) ([]FileDesc, error) { - ms.mu.Lock() - var fds []FileDesc - for x := range ms.files { - fd := unpackFile(x) - if fd.Type&ft != 0 { - fds = append(fds, fd) - } - } - ms.mu.Unlock() - return fds, nil -} - -func (ms *memStorage) Open(fd FileDesc) (Reader, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - - ms.mu.Lock() - defer ms.mu.Unlock() - if m, exist := ms.files[packFile(fd)]; exist { - if m.open { - return nil, errFileOpen - } - m.open = true - return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil - } - return nil, os.ErrNotExist -} - -func (ms *memStorage) Create(fd FileDesc) (Writer, error) { - if !FileDescOk(fd) { - return nil, ErrInvalidFile - } - - x := packFile(fd) - ms.mu.Lock() - defer ms.mu.Unlock() - m, exist := ms.files[x] - if exist { - if m.open { - return nil, errFileOpen - } - m.Reset() - } else { - m = &memFile{} - ms.files[x] = m - } - m.open = true - return &memWriter{memFile: m, ms: ms}, nil -} - -func (ms *memStorage) Remove(fd FileDesc) error { - if !FileDescOk(fd) { - return ErrInvalidFile - } - - x := packFile(fd) - ms.mu.Lock() - defer ms.mu.Unlock() - if _, exist := ms.files[x]; exist { - delete(ms.files, x) - return nil - } - return os.ErrNotExist -} - -func (ms *memStorage) Rename(oldfd, newfd FileDesc) error { - if !FileDescOk(oldfd) || !FileDescOk(newfd) { - return ErrInvalidFile - } - if oldfd == newfd { - return nil - } - - oldx := packFile(oldfd) - newx := packFile(newfd) - ms.mu.Lock() - defer ms.mu.Unlock() - oldm, exist := ms.files[oldx] - if !exist { - return os.ErrNotExist - } - newm, exist := ms.files[newx] - if (exist && newm.open) || oldm.open { - return errFileOpen - } - delete(ms.files, oldx) - ms.files[newx] = oldm - return nil -} - -func (*memStorage) Close() error { return nil } - -type memFile struct { - bytes.Buffer - open bool -} - -type memReader struct { - *bytes.Reader - ms *memStorage - m *memFile - closed bool -} - -func (mr *memReader) Close() error { - mr.ms.mu.Lock() - defer mr.ms.mu.Unlock() - if mr.closed { - return ErrClosed - } - mr.m.open = false - return nil -} - -type memWriter struct { - *memFile - ms *memStorage - closed bool -} - -func (*memWriter) Sync() error { return nil } - -func (mw *memWriter) Close() error { - mw.ms.mu.Lock() - defer mw.ms.mu.Unlock() - if mw.closed { - return ErrClosed - } - mw.memFile.open = false - return nil -} - -func packFile(fd FileDesc) uint64 { - return uint64(fd.Num)<> typeShift)} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go deleted file mode 100644 index 4e4a7242..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package storage provides storage abstraction for LevelDB. -package storage - -import ( - "errors" - "fmt" - "io" -) - -// FileType represent a file type. -type FileType int - -// File types. -const ( - TypeManifest FileType = 1 << iota - TypeJournal - TypeTable - TypeTemp - - TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp -) - -func (t FileType) String() string { - switch t { - case TypeManifest: - return "manifest" - case TypeJournal: - return "journal" - case TypeTable: - return "table" - case TypeTemp: - return "temp" - } - return fmt.Sprintf("", t) -} - -// Common error. -var ( - ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") - ErrLocked = errors.New("leveldb/storage: already locked") - ErrClosed = errors.New("leveldb/storage: closed") -) - -// ErrCorrupted is the type that wraps errors that indicate corruption of -// a file. Package storage has its own type instead of using -// errors.ErrCorrupted to prevent circular import. -type ErrCorrupted struct { - Fd FileDesc - Err error -} - -func isCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - } - return false -} - -func (e *ErrCorrupted) Error() string { - if !e.Fd.Zero() { - return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) - } - return e.Err.Error() -} - -// Syncer is the interface that wraps basic Sync method. -type Syncer interface { - // Sync commits the current contents of the file to stable storage. - Sync() error -} - -// Reader is the interface that groups the basic Read, Seek, ReadAt and Close -// methods. -type Reader interface { - io.ReadSeeker - io.ReaderAt - io.Closer -} - -// Writer is the interface that groups the basic Write, Sync and Close -// methods. -type Writer interface { - io.WriteCloser - Syncer -} - -// Locker is the interface that wraps Unlock method. -type Locker interface { - Unlock() -} - -// FileDesc is a 'file descriptor'. -type FileDesc struct { - Type FileType - Num int64 -} - -func (fd FileDesc) String() string { - switch fd.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fd.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fd.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fd.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fd.Num) - default: - return fmt.Sprintf("%#x-%d", fd.Type, fd.Num) - } -} - -// Zero returns true if fd == (FileDesc{}). -func (fd FileDesc) Zero() bool { - return fd == (FileDesc{}) -} - -// FileDescOk returns true if fd is a valid 'file descriptor'. -func FileDescOk(fd FileDesc) bool { - switch fd.Type { - case TypeManifest: - case TypeJournal: - case TypeTable: - case TypeTemp: - default: - return false - } - return fd.Num >= 0 -} - -// Storage is the storage. A storage instance must be safe for concurrent use. -type Storage interface { - // Lock locks the storage. Any subsequent attempt to call Lock will fail - // until the last lock released. - // Caller should call Unlock method after use. - Lock() (Locker, error) - - // Log logs a string. This is used for logging. - // An implementation may write to a file, stdout or simply do nothing. - Log(str string) - - // SetMeta store 'file descriptor' that can later be acquired using GetMeta - // method. The 'file descriptor' should point to a valid file. - // SetMeta should be implemented in such way that changes should happen - // atomically. - SetMeta(fd FileDesc) error - - // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor' - // can be updated using SetMeta method. - // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or - // 'file descriptor' point to nonexistent file. - GetMeta() (FileDesc, error) - - // List returns file descriptors that match the given file types. - // The file types may be OR'ed together. - List(ft FileType) ([]FileDesc, error) - - // Open opens file with the given 'file descriptor' read-only. - // Returns os.ErrNotExist error if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open(fd FileDesc) (Reader, error) - - // Create creates file with the given 'file descriptor', truncate if already - // exist and opens write-only. - // Returns ErrClosed if the underlying storage is closed. - Create(fd FileDesc) (Writer, error) - - // Remove removes file with the given 'file descriptor'. - // Returns ErrClosed if the underlying storage is closed. - Remove(fd FileDesc) error - - // Rename renames file from oldfd to newfd. - // Returns ErrClosed if the underlying storage is closed. - Rename(oldfd, newfd FileDesc) error - - // Close closes the storage. - // It is valid to call Close multiple times. Other methods should not be - // called after the storage has been closed. - Close() error -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go deleted file mode 100644 index adf773f1..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// tFile holds basic information about a table. -type tFile struct { - fd storage.FileDesc - seekLeft int32 - size int64 - imin, imax internalKey -} - -// Returns true if given key is after largest key of this table. -func (t *tFile) after(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 -} - -// Returns true if given key is before smallest key of this table. -func (t *tFile) before(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 -} - -// Returns true if given key range overlaps with this table key range. -func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { - return !t.after(icmp, umin) && !t.before(icmp, umax) -} - -// Cosumes one seek and return current seeks left. -func (t *tFile) consumeSeek() int32 { - return atomic.AddInt32(&t.seekLeft, -1) -} - -// Creates new tFile. -func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { - f := &tFile{ - fd: fd, - size: size, - imin: imin, - imax: imax, - } - - // We arrange to automatically compact this file after - // a certain number of seeks. Let's assume: - // (1) One seek costs 10ms - // (2) Writing or reading 1MB costs 10ms (100MB/s) - // (3) A compaction of 1MB does 25MB of IO: - // 1MB read from this level - // 10-12MB read from next level (boundaries may be misaligned) - // 10-12MB written to next level - // This implies that 25 seeks cost the same as the compaction - // of 1MB of data. I.e., one seek costs approximately the - // same as the compaction of 40KB of data. We are a little - // conservative and allow approximately one seek for every 16KB - // of data before triggering a compaction. - f.seekLeft = int32(size / 16384) - if f.seekLeft < 100 { - f.seekLeft = 100 - } - - return f -} - -func tableFileFromRecord(r atRecord) *tFile { - return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax) -} - -// tFiles hold multiple tFile. -type tFiles []*tFile - -func (tf tFiles) Len() int { return len(tf) } -func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } - -func (tf tFiles) nums() string { - x := "[ " - for i, f := range tf { - if i != 0 { - x += ", " - } - x += fmt.Sprint(f.fd.Num) - } - x += " ]" - return x -} - -// Returns true if i smallest key is less than j. -// This used for sort by key in ascending order. -func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { - a, b := tf[i], tf[j] - n := icmp.Compare(a.imin, b.imin) - if n == 0 { - return a.fd.Num < b.fd.Num - } - return n < 0 -} - -// Returns true if i file number is greater than j. -// This used for sort by file number in descending order. -func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].fd.Num > tf[j].fd.Num -} - -// Sorts tables by key in ascending order. -func (tf tFiles) sortByKey(icmp *iComparer) { - sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) -} - -// Sorts tables by file number in descending order. -func (tf tFiles) sortByNum() { - sort.Sort(&tFilesSortByNum{tFiles: tf}) -} - -// Returns sum of all tables size. -func (tf tFiles) size() (sum int64) { - for _, t := range tf { - sum += t.size - } - return sum -} - -// Searches smallest index of tables whose its smallest -// key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imin, ikey) >= 0 - }) -} - -// Searches smallest index of tables whose its largest -// key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imax, ikey) >= 0 - }) -} - -// Returns true if given key range overlaps with one or more -// tables key range. If unsorted is true then binary search will not be used. -func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { - if unsorted { - // Check against all files. - for _, t := range tf { - if t.overlaps(icmp, umin, umax) { - return true - } - } - return false - } - - i := 0 - if len(umin) > 0 { - // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) - } - if i >= len(tf) { - // Beginning of range is after all files, so no overlap. - return false - } - return !tf[i].before(icmp, umax) -} - -// Returns tables whose its key range overlaps with given key range. -// Range will be expanded if ukey found hop across tables. -// If overlapped is true then the search will be restarted if umax -// expanded. -// The dst content will be overwritten. -func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { - dst = dst[:0] - for i := 0; i < len(tf); { - t := tf[i] - if t.overlaps(icmp, umin, umax) { - if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { - umin = t.imin.ukey() - dst = dst[:0] - i = 0 - continue - } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { - umax = t.imax.ukey() - // Restart search if it is overlapped. - if overlapped { - dst = dst[:0] - i = 0 - continue - } - } - - dst = append(dst, t) - } - i++ - } - - return dst -} - -// Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { - for i, t := range tf { - if i == 0 { - imin, imax = t.imin, t.imax - continue - } - if icmp.Compare(t.imin, imin) < 0 { - imin = t.imin - } - if icmp.Compare(t.imax, imax) > 0 { - imax = t.imax - } - } - - return -} - -// Creates iterator index from tables. -func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { - if slice != nil { - var start, limit int - if slice.Start != nil { - start = tf.searchMax(icmp, internalKey(slice.Start)) - } - if slice.Limit != nil { - limit = tf.searchMin(icmp, internalKey(slice.Limit)) - } else { - limit = tf.Len() - } - tf = tf[start:limit] - } - return iterator.NewArrayIndexer(&tFilesArrayIndexer{ - tFiles: tf, - tops: tops, - icmp: icmp, - slice: slice, - ro: ro, - }) -} - -// Tables iterator index. -type tFilesArrayIndexer struct { - tFiles - tops *tOps - icmp *iComparer - slice *util.Range - ro *opt.ReadOptions -} - -func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, internalKey(key)) -} - -func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { - if i == 0 || i == a.Len()-1 { - return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) - } - return a.tops.newIterator(a.tFiles[i], nil, a.ro) -} - -// Helper type for sortByKey. -type tFilesSortByKey struct { - tFiles - icmp *iComparer -} - -func (x *tFilesSortByKey) Less(i, j int) bool { - return x.lessByKey(x.icmp, i, j) -} - -// Helper type for sortByNum. -type tFilesSortByNum struct { - tFiles -} - -func (x *tFilesSortByNum) Less(i, j int) bool { - return x.lessByNum(i, j) -} - -// Table operations. -type tOps struct { - s *session - noSync bool - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool -} - -// Creates an empty table and returns table writer. -func (t *tOps) create() (*tWriter, error) { - fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()} - fw, err := t.s.stor.Create(fd) - if err != nil { - return nil, err - } - return &tWriter{ - t: t, - fd: fd, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), - }, nil -} - -// Builds table from src iterator. -func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { - w, err := t.create() - if err != nil { - return - } - - defer func() { - if err != nil { - w.drop() - } - }() - - for src.Next() { - err = w.append(src.Key(), src.Value()) - if err != nil { - return - } - } - err = src.Error() - if err != nil { - return - } - - n = w.tw.EntriesLen() - f, err = w.finish() - return -} - -// Opens table. It returns a cache handle, which should -// be released after use. -func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { - var r storage.Reader - r, err = t.s.stor.Open(f.fd) - if err != nil { - return 0, nil - } - - var bcache *cache.NamespaceGetter - if t.bcache != nil { - bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} - } - - var tr *table.Reader - tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) - if err != nil { - r.Close() - return 0, nil - } - return 1, tr - - }) - if ch == nil && err == nil { - err = ErrClosed - } - return -} - -// Finds key/value pair whose key is greater than or equal to the -// given key. -func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).Find(key, true, ro) -} - -// Finds key that is greater than or equal to the given key. -func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).FindKey(key, true, ro) -} - -// Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { - ch, err := t.open(f) - if err != nil { - return - } - defer ch.Release() - return ch.Value().(*table.Reader).OffsetOf(key) -} - -// Creates an iterator from the given table. -func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - ch, err := t.open(f) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := ch.Value().(*table.Reader).NewIterator(slice, ro) - iter.SetReleaser(ch) - return iter -} - -// Removes table from persistent storage. It waits until -// no one use the the table. -func (t *tOps) remove(f *tFile) { - t.cache.Delete(0, uint64(f.fd.Num), func() { - if err := t.s.stor.Remove(f.fd); err != nil { - t.s.logf("table@remove removing @%d %q", f.fd.Num, err) - } else { - t.s.logf("table@remove removed @%d", f.fd.Num) - } - if t.bcache != nil { - t.bcache.EvictNS(uint64(f.fd.Num)) - } - }) -} - -// Closes the table ops instance. It will close all tables, -// regadless still used or not. -func (t *tOps) close() { - t.bpool.Close() - t.cache.Close() - if t.bcache != nil { - t.bcache.CloseWeak() - } -} - -// Creates new initialized table ops instance. -func newTableOps(s *session) *tOps { - var ( - cacher cache.Cacher - bcache *cache.Cache - bpool *util.BufferPool - ) - if s.o.GetOpenFilesCacheCapacity() > 0 { - cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) - } - if !s.o.GetDisableBlockCache() { - var bcacher cache.Cacher - if s.o.GetBlockCacheCapacity() > 0 { - bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) - } - bcache = cache.NewCache(bcacher) - } - if !s.o.GetDisableBufferPool() { - bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) - } - return &tOps{ - s: s, - noSync: s.o.GetNoSync(), - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: bpool, - } -} - -// tWriter wraps the table writer. It keep track of file descriptor -// and added key range. -type tWriter struct { - t *tOps - - fd storage.FileDesc - w storage.Writer - tw *table.Writer - - first, last []byte -} - -// Append key/value pair to the table. -func (w *tWriter) append(key, value []byte) error { - if w.first == nil { - w.first = append([]byte{}, key...) - } - w.last = append(w.last[:0], key...) - return w.tw.Append(key, value) -} - -// Returns true if the table is empty. -func (w *tWriter) empty() bool { - return w.first == nil -} - -// Closes the storage.Writer. -func (w *tWriter) close() { - if w.w != nil { - w.w.Close() - w.w = nil - } -} - -// Finalizes the table and returns table file. -func (w *tWriter) finish() (f *tFile, err error) { - defer w.close() - err = w.tw.Close() - if err != nil { - return - } - if !w.t.noSync { - err = w.w.Sync() - if err != nil { - return - } - } - f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) - return -} - -// Drops the table. -func (w *tWriter) drop() { - w.close() - w.t.s.stor.Remove(w.fd) - w.t.s.reuseFileNum(w.fd.Num) - w.tw = nil - w.first = nil - w.last = nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go deleted file mode 100644 index 16cfbaa0..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ /dev/null @@ -1,1135 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - "io" - "sort" - "strings" - "sync" - - "github.com/golang/snappy" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader errors. -var ( - ErrNotFound = errors.ErrNotFound - ErrReaderReleased = errors.New("leveldb/table: reader released") - ErrIterReleased = errors.New("leveldb/table: iterator released") -) - -// ErrCorrupted describes error due to corruption. This error will be wrapped -// with errors.ErrCorrupted. -type ErrCorrupted struct { - Pos int64 - Size int64 - Kind string - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) -} - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -type block struct { - bpool *util.BufferPool - bh blockHandle - data []byte - restartsLen int - restartsOffset int -} - -func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { - index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) - offset++ // shared always zero, since this is a restart point - v1, n1 := binary.Uvarint(b.data[offset:]) // key length - _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length - m := offset + n1 + n2 - return cmp.Compare(b.data[m:m+int(v1)], key) > 0 - }) + rstart - 1 - if index < rstart { - // The smallest key is greater-than key sought. - index = rstart - } - offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) - return -} - -func (b *block) restartIndex(rstart, rlimit, offset int) int { - return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset - }) + rstart - 1 -} - -func (b *block) restartOffset(index int) int { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) -} - -func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { - if offset >= b.restartsOffset { - if offset != b.restartsOffset { - err = &ErrCorrupted{Reason: "entries offset not aligned"} - } - return - } - v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length - v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length - v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length - m := n0 + n1 + n2 - n = m + int(v1) + int(v2) - if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { - err = &ErrCorrupted{Reason: "entries corrupted"} - return - } - key = b.data[offset+m : offset+m+int(v1)] - value = b.data[offset+m+int(v1) : offset+n] - nShared = int(v0) - return -} - -func (b *block) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type blockIter struct { - tr *Reader - block *block - blockReleaser util.Releaser - releaser util.Releaser - key, value []byte - offset int - // Previous offset, only filled by Next. - prevOffset int - prevNode []int - prevKeys []byte - restartIndex int - // Iterator direction. - dir dir - // Restart index slice range. - riStart int - riLimit int - // Offset slice range. - offsetStart int - offsetRealStart int - offsetLimit int - // Error. - err error -} - -func (i *blockIter) sErr(err error) { - i.err = err - i.key = nil - i.value = nil - i.prevNode = nil - i.prevKeys = nil -} - -func (i *blockIter) reset() { - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.restartIndex = i.riStart - i.offset = i.offsetStart - i.dir = dirSOI - i.key = i.key[:0] - i.value = nil -} - -func (i *blockIter) isFirst() bool { - switch i.dir { - case dirForward: - return i.prevOffset == i.offsetRealStart - case dirBackward: - return len(i.prevNode) == 1 && i.restartIndex == i.riStart - } - return false -} - -func (i *blockIter) isLast() bool { - switch i.dir { - case dirForward, dirBackward: - return i.offset == i.offsetLimit - } - return false -} - -func (i *blockIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirSOI - return i.Next() -} - -func (i *blockIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirEOI - return i.Prev() -} - -func (i *blockIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) - if err != nil { - i.sErr(err) - return false - } - i.restartIndex = ri - i.offset = max(i.offsetStart, offset) - if i.dir == dirSOI || i.dir == dirEOI { - i.dir = dirForward - } - for i.Next() { - if i.tr.cmp.Compare(i.key, key) >= 0 { - return true - } - } - return false -} - -func (i *blockIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirSOI { - i.restartIndex = i.riStart - i.offset = i.offsetStart - } else if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - for i.offset < i.offsetRealStart { - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.offset += n - } - if i.offset >= i.offsetLimit { - i.dir = dirEOI - if i.offset != i.offsetLimit { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - } - return false - } - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.prevOffset = i.offset - i.offset += n - i.dir = dirForward - return true -} - -func (i *blockIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - var ri int - if i.dir == dirForward { - // Change direction. - i.offset = i.prevOffset - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) - i.dir = dirBackward - } else if i.dir == dirEOI { - // At the end of iterator. - i.restartIndex = i.riLimit - i.offset = i.offsetLimit - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.riLimit - 1 - i.dir = dirBackward - } else if len(i.prevNode) == 1 { - // This is the end of a restart range. - i.offset = i.prevNode[0] - i.prevNode = i.prevNode[:0] - if i.restartIndex == i.riStart { - i.dir = dirSOI - return false - } - i.restartIndex-- - ri = i.restartIndex - } else { - // In the middle of restart range, get from cache. - n := len(i.prevNode) - 3 - node := i.prevNode[n:] - i.prevNode = i.prevNode[:n] - // Get the key. - ko := node[0] - i.key = append(i.key[:0], i.prevKeys[ko:]...) - i.prevKeys = i.prevKeys[:ko] - // Get the value. - vo := node[1] - vl := vo + node[2] - i.value = i.block.data[vo:vl] - i.offset = vl - return true - } - // Build entries cache. - i.key = i.key[:0] - i.value = nil - offset := i.block.restartOffset(ri) - if offset == i.offset { - ri-- - if ri < 0 { - i.dir = dirSOI - return false - } - offset = i.block.restartOffset(ri) - } - i.prevNode = append(i.prevNode, offset) - for { - key, value, nShared, n, err := i.block.entry(offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if offset >= i.offsetRealStart { - if i.value != nil { - // Appends 3 variables: - // 1. Previous keys offset - // 2. Value offset in the data block - // 3. Value length - i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) - i.prevKeys = append(i.prevKeys, i.key...) - } - i.value = value - } - i.key = append(i.key[:nShared], key...) - offset += n - // Stop if target offset reached. - if offset >= i.offset { - if offset != i.offset { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - return false - } - - break - } - } - i.restartIndex = ri - i.offset = offset - return true -} - -func (i *blockIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *blockIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *blockIter) Release() { - if i.dir != dirReleased { - i.tr = nil - i.block = nil - i.prevNode = nil - i.prevKeys = nil - i.key = nil - i.value = nil - i.dir = dirReleased - if i.blockReleaser != nil { - i.blockReleaser.Release() - i.blockReleaser = nil - } - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *blockIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *blockIter) Valid() bool { - return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) -} - -func (i *blockIter) Error() error { - return i.err -} - -type filterBlock struct { - bpool *util.BufferPool - data []byte - oOffset int - baseLg uint - filtersNum int -} - -func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { - i := int(offset >> b.baseLg) - if i < b.filtersNum { - o := b.data[b.oOffset+i*4:] - n := int(binary.LittleEndian.Uint32(o)) - m := int(binary.LittleEndian.Uint32(o[4:])) - if n < m && m <= b.oOffset { - return filter.Contains(b.data[n:m], key) - } else if n == m { - return false - } - } - return true -} - -func (b *filterBlock) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type indexIter struct { - *blockIter - tr *Reader - slice *util.Range - // Options - fillCache bool -} - -func (i *indexIter) Get() iterator.Iterator { - value := i.Value() - if value == nil { - return nil - } - dataBH, n := decodeBlockHandle(value) - if n == 0 { - return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) - } - - var slice *util.Range - if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { - slice = i.slice - } - return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) -} - -// Reader is a table reader. -type Reader struct { - mu sync.RWMutex - fd storage.FileDesc - reader io.ReaderAt - cache *cache.NamespaceGetter - err error - bpool *util.BufferPool - // Options - o *opt.Options - cmp comparer.Comparer - filter filter.Filter - verifyChecksum bool - - dataEnd int64 - metaBH, indexBH, filterBH blockHandle - indexBlock *block - filterBlock *filterBlock -} - -func (r *Reader) blockKind(bh blockHandle) string { - switch bh.offset { - case r.metaBH.offset: - return "meta-block" - case r.indexBH.offset: - return "index-block" - case r.filterBH.offset: - if r.filterBH.length > 0 { - return "filter-block" - } - } - return "data-block" -} - -func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} -} - -func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { - return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) -} - -func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { - if cerr, ok := err.(*ErrCorrupted); ok { - cerr.Pos = int64(bh.offset) - cerr.Size = int64(bh.length) - cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{Fd: r.fd, Err: cerr} - } - return err -} - -func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { - data := r.bpool.Get(int(bh.length + blockTrailerLen)) - if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { - return nil, err - } - - if verifyChecksum { - n := bh.length + 1 - checksum0 := binary.LittleEndian.Uint32(data[n:]) - checksum1 := util.NewCRC(data[:n]).Value() - if checksum0 != checksum1 { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) - } - } - - switch data[bh.length] { - case blockTypeNoCompression: - data = data[:bh.length] - case blockTypeSnappyCompression: - decLen, err := snappy.DecodedLen(data[:bh.length]) - if err != nil { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - decData := r.bpool.Get(decLen) - decData, err = snappy.Decode(decData, data[:bh.length]) - r.bpool.Put(data) - if err != nil { - r.bpool.Put(decData) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - data = decData - default: - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) - } - return data, nil -} - -func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { - data, err := r.readRawBlock(bh, verifyChecksum) - if err != nil { - return nil, err - } - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - b := &block{ - bpool: r.bpool, - bh: bh, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - } - return b, nil -} - -func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *block - b, err = r.readBlock(bh, verifyChecksum) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*block) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readBlock(bh, verifyChecksum) - return b, b, err -} - -func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { - data, err := r.readRawBlock(bh, true) - if err != nil { - return nil, err - } - n := len(data) - if n < 5 { - return nil, r.newErrCorruptedBH(bh, "too short") - } - m := n - 5 - oOffset := int(binary.LittleEndian.Uint32(data[m:])) - if oOffset > m { - return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") - } - b := &filterBlock{ - bpool: r.bpool, - data: data, - oOffset: oOffset, - baseLg: uint(data[n-1]), - filtersNum: (m - oOffset) / 4, - } - return b, nil -} - -func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *filterBlock - b, err = r.readFilterBlock(bh) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*filterBlock) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readFilterBlock(bh) - return b, b, err -} - -func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { - if r.indexBlock == nil { - return r.readBlockCached(r.indexBH, true, fillCache) - } - return r.indexBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { - if r.filterBlock == nil { - return r.readFilterBlockCached(r.filterBH, fillCache) - } - return r.filterBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { - bi := &blockIter{ - tr: r, - block: b, - blockReleaser: bReleaser, - // Valid key should never be nil. - key: make([]byte, 0), - dir: dirSOI, - riStart: 0, - riLimit: b.restartsLen, - offsetStart: 0, - offsetRealStart: 0, - offsetLimit: b.restartsOffset, - } - if slice != nil { - if slice.Start != nil { - if bi.Seek(slice.Start) { - bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) - bi.offsetStart = b.restartOffset(bi.riStart) - bi.offsetRealStart = bi.prevOffset - } else { - bi.riStart = b.restartsLen - bi.offsetStart = b.restartsOffset - bi.offsetRealStart = b.restartsOffset - } - } - if slice.Limit != nil { - if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { - bi.offsetLimit = bi.prevOffset - bi.riLimit = bi.restartIndex + 1 - } - } - bi.reset() - if bi.offsetStart > bi.offsetLimit { - bi.sErr(errors.New("leveldb/table: invalid slice range")) - } - } - return bi -} - -func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - return r.newBlockIter(b, rel, slice, false) -} - -func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) -} - -// NewIterator creates an iterator from the table. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// table. And a nil Range.Limit is treated as a key after all keys in -// the table. -// -// The returned iterator is not safe for concurrent use and should be released -// after use. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - fillCache := !ro.GetDontFillCache() - indexBlock, rel, err := r.getIndexBlock(fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - index := &indexIter{ - blockIter: r.newBlockIter(indexBlock, rel, slice, true), - tr: r, - slice: slice, - fillCache: !ro.GetDontFillCache(), - } - return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) -} - -func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.getIndexBlock(true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - - if !index.Seek(key) { - if err = index.Error(); err == nil { - err = ErrNotFound - } - return - } - - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return nil, nil, r.err - } - - // The filter should only used for exact match. - if filtered && r.filter != nil { - filterBlock, frel, ferr := r.getFilterBlock(true) - if ferr == nil { - if !filterBlock.contains(r.filter, dataBH.offset, key) { - frel.Release() - return nil, nil, ErrNotFound - } - frel.Release() - } else if !errors.IsCorrupted(ferr) { - return nil, nil, ferr - } - } - - data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - if !data.Seek(key) { - data.Release() - if err = data.Error(); err != nil { - return - } - - // The nearest greater-than key is the first key of the next block. - if !index.Next() { - if err = index.Error(); err == nil { - err = ErrNotFound - } - return - } - - dataBH, n = decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return nil, nil, r.err - } - - data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - if !data.Next() { - data.Release() - if err = data.Error(); err == nil { - err = ErrNotFound - } - return - } - } - - // Key doesn't use block buffer, no need to copy the buffer. - rkey = data.Key() - if !noValue { - if r.bpool == nil { - value = data.Value() - } else { - // Value does use block buffer, and since the buffer will be - // recycled, it need to be copied. - value = append([]byte{}, data.Value()...) - } - } - data.Release() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such pair doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { - return r.find(key, filtered, ro, false) -} - -// FindKey finds key that is greater than or equal to the given key. -// It returns ErrNotFound if the table doesn't contain such key. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such key doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { - rkey, _, err = r.find(key, filtered, ro, true) - return -} - -// Get gets the value for the given key. It returns errors.ErrNotFound -// if the table does not contain the key. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - rkey, value, err := r.find(key, false, ro, false) - if err == nil && r.cmp.Compare(rkey, key) != 0 { - value = nil - err = ErrNotFound - } - return -} - -// OffsetOf returns approximate offset for the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if index.Seek(key) { - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - offset = int64(dataBH.offset) - return - } - err = index.Error() - if err == nil { - offset = r.dataEnd - } - return -} - -// Release implements util.Releaser. -// It also close the file if it is an io.Closer. -func (r *Reader) Release() { - r.mu.Lock() - defer r.mu.Unlock() - - if closer, ok := r.reader.(io.Closer); ok { - closer.Close() - } - if r.indexBlock != nil { - r.indexBlock.Release() - r.indexBlock = nil - } - if r.filterBlock != nil { - r.filterBlock.Release() - r.filterBlock = nil - } - r.reader = nil - r.cache = nil - r.bpool = nil - r.err = ErrReaderReleased -} - -// NewReader creates a new initialized table reader for the file. -// The fi, cache and bpool is optional and can be nil. -// -// The returned table reader instance is safe for concurrent use. -func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { - if f == nil { - return nil, errors.New("leveldb/table: nil file") - } - - r := &Reader{ - fd: fd, - reader: f, - cache: cache, - bpool: bpool, - o: o, - cmp: o.GetComparer(), - verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), - } - - if size < footerLen { - r.err = r.newErrCorrupted(0, size, "table", "too small") - return r, nil - } - - footerPos := size - footerLen - var footer [footerLen]byte - if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { - return nil, err - } - if string(footer[footerLen-len(magic):footerLen]) != magic { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") - return r, nil - } - - var n int - // Decode the metaindex block handle. - r.metaBH, n = decodeBlockHandle(footer[:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") - return r, nil - } - - // Decode the index block handle. - r.indexBH, n = decodeBlockHandle(footer[n:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") - return r, nil - } - - // Read metaindex block. - metaBlock, err := r.readBlock(r.metaBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } - return nil, err - } - - // Set data end. - r.dataEnd = int64(r.metaBH.offset) - - // Read metaindex. - metaIter := r.newBlockIter(metaBlock, nil, nil, true) - for metaIter.Next() { - key := string(metaIter.Key()) - if !strings.HasPrefix(key, "filter.") { - continue - } - fn := key[7:] - if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { - r.filter = f0 - } else { - for _, f0 := range o.GetAltFilters() { - if f0.Name() == fn { - r.filter = f0 - break - } - } - } - if r.filter != nil { - filterBH, n := decodeBlockHandle(metaIter.Value()) - if n == 0 { - continue - } - r.filterBH = filterBH - // Update data end. - r.dataEnd = int64(filterBH.offset) - break - } - } - metaIter.Release() - metaBlock.Release() - - // Cache index and filter block locally, since we don't have global cache. - if cache == nil { - r.indexBlock, err = r.readBlock(r.indexBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } - return nil, err - } - if r.filter != nil { - r.filterBlock, err = r.readFilterBlock(r.filterBH) - if err != nil { - if !errors.IsCorrupted(err) { - return nil, err - } - - // Don't use filter then. - r.filter = nil - } - } - } - - return r, nil -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go deleted file mode 100644 index beacdc1f..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package table allows read and write sorted key/value. -package table - -import ( - "encoding/binary" -) - -/* -Table: - -Table is consist of one or more data blocks, an optional filter block -a metaindex block, an index block and a table footer. Metaindex block -is a special block used to keep parameters of the table, such as filter -block name and its block handle. Index block is a special block used to -keep record of data blocks offset and length, index block use one as -restart interval. The key used by index block are the last key of preceding -block, shorter separator of adjacent blocks or shorter successor of the -last key of the last block. Filter block is an optional block contains -sequence of filter data generated by a filter generator. - -Table data structure: - + optional - / - +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ - | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | - +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ - - Each block followed by a 5-bytes trailer contains compression type and checksum. - -Table block trailer: - - +---------------------------+-------------------+ - | compression type (1-byte) | checksum (4-byte) | - +---------------------------+-------------------+ - - The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression - type also included in the checksum. - -Table footer: - - +------------------- 40-bytes -------------------+ - / \ - +------------------------+--------------------+------+-----------------+ - | metaindex block handle / index block handle / ---- | magic (8-bytes) | - +------------------------+--------------------+------+-----------------+ - - The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Block: - -Block is consist of one or more key/value entries and a block trailer. -Block entry shares key prefix with its preceding key until a restart -point reached. A block should contains at least one restart point. -First restart point are always zero. - -Block data structure: - - + restart point + restart point (depends on restart interval) - / / - +---------------+---------------+---------------+---------------+---------+ - | block entry 1 | block entry 2 | ... | block entry n | trailer | - +---------------+---------------+---------------+---------------+---------+ - -Key/value entry: - - +---- key len ----+ - / \ - +-------+---------+-----------+---------+--------------------+--------------+----------------+ - | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | - +-----------------+---------------------+--------------------+--------------+----------------+ - - Block entry shares key prefix with its preceding key: - Conditions: - restart_interval=2 - entry one : key=deck,value=v1 - entry two : key=dock,value=v2 - entry three: key=duck,value=v3 - The entries will be encoded as follow: - - + restart point (offset=0) + restart point (offset=16) - / / - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - \ / \ / \ / - +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ - - The block trailer will contains two restart points: - - +------------+-----------+--------+ - | 0 | 16 | 2 | - +------------+-----------+---+----+ - \ / \ - +-- restart points --+ + restart points length - -Block trailer: - - +-- 4-bytes --+ - / \ - +-----------------+-----------------+-----------------+------------------------------+ - | restart point 1 | .... | restart point n | restart points len (4-bytes) | - +-----------------+-----------------+-----------------+------------------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Filter block: - -Filter block consist of one or more filter data and a filter block trailer. -The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. - -Filter block data structure: - - + offset 1 + offset 2 + offset n + trailer offset - / / / / - +---------------+---------------+---------------+---------+ - | filter data 1 | ... | filter data n | trailer | - +---------------+---------------+---------------+---------+ - -Filter block trailer: - - +- 4-bytes -+ - / \ - +---------------+---------------+---------------+-------------------------------+------------------+ - | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | - +-------------- +---------------+---------------+-------------------------------+------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -const ( - blockTrailerLen = 5 - footerLen = 48 - - magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" - - // The block type gives the per-block compression format. - // These constants are part of the file format and should not be changed. - blockTypeNoCompression = 0 - blockTypeSnappyCompression = 1 - - // Generate new filter every 2KB of data - filterBaseLg = 11 - filterBase = 1 << filterBaseLg -) - -type blockHandle struct { - offset, length uint64 -} - -func decodeBlockHandle(src []byte) (blockHandle, int) { - offset, n := binary.Uvarint(src) - length, m := binary.Uvarint(src[n:]) - if n == 0 || m == 0 { - return blockHandle{}, 0 - } - return blockHandle{offset, length}, n + m -} - -func encodeBlockHandle(dst []byte, b blockHandle) int { - n := binary.PutUvarint(dst, b.offset) - m := binary.PutUvarint(dst[n:], b.length) - return n + m -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go deleted file mode 100644 index b96b271d..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/golang/snappy" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func sharedPrefixLen(a, b []byte) int { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for i < n && a[i] == b[i] { - i++ - } - return i -} - -type blockWriter struct { - restartInterval int - buf util.Buffer - nEntries int - prevKey []byte - restarts []uint32 - scratch []byte -} - -func (w *blockWriter) append(key, value []byte) { - nShared := 0 - if w.nEntries%w.restartInterval == 0 { - w.restarts = append(w.restarts, uint32(w.buf.Len())) - } else { - nShared = sharedPrefixLen(w.prevKey, key) - } - n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) - w.prevKey = append(w.prevKey[:0], key...) - w.nEntries++ -} - -func (w *blockWriter) finish() { - // Write restarts entry. - if w.nEntries == 0 { - // Must have at least one restart entry. - w.restarts = append(w.restarts, 0) - } - w.restarts = append(w.restarts, uint32(len(w.restarts))) - for _, x := range w.restarts { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } -} - -func (w *blockWriter) reset() { - w.buf.Reset() - w.nEntries = 0 - w.restarts = w.restarts[:0] -} - -func (w *blockWriter) bytesLen() int { - restartsLen := len(w.restarts) - if restartsLen == 0 { - restartsLen = 1 - } - return w.buf.Len() + 4*restartsLen + 4 -} - -type filterWriter struct { - generator filter.FilterGenerator - buf util.Buffer - nKeys int - offsets []uint32 -} - -func (w *filterWriter) add(key []byte) { - if w.generator == nil { - return - } - w.generator.Add(key) - w.nKeys++ -} - -func (w *filterWriter) flush(offset uint64) { - if w.generator == nil { - return - } - for x := int(offset / filterBase); x > len(w.offsets); { - w.generate() - } -} - -func (w *filterWriter) finish() { - if w.generator == nil { - return - } - // Generate last keys. - - if w.nKeys > 0 { - w.generate() - } - w.offsets = append(w.offsets, uint32(w.buf.Len())) - for _, x := range w.offsets { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } - w.buf.WriteByte(filterBaseLg) -} - -func (w *filterWriter) generate() { - // Record offset. - w.offsets = append(w.offsets, uint32(w.buf.Len())) - // Generate filters. - if w.nKeys > 0 { - w.generator.Generate(&w.buf) - w.nKeys = 0 - } -} - -// Writer is a table writer. -type Writer struct { - writer io.Writer - err error - // Options - cmp comparer.Comparer - filter filter.Filter - compression opt.Compression - blockSize int - - dataBlock blockWriter - indexBlock blockWriter - filterBlock filterWriter - pendingBH blockHandle - offset uint64 - nEntries int - // Scratch allocated enough for 5 uvarint. Block writer should not use - // first 20-bytes since it will be used to encode block handle, which - // then passed to the block writer itself. - scratch [50]byte - comparerScratch []byte - compressionScratch []byte -} - -func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { - // Compress the buffer if necessary. - var b []byte - if compression == opt.SnappyCompression { - // Allocate scratch enough for compression and block trailer. - if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { - w.compressionScratch = make([]byte, n) - } - compressed := snappy.Encode(w.compressionScratch, buf.Bytes()) - n := len(compressed) - b = compressed[:n+blockTrailerLen] - b[n] = blockTypeSnappyCompression - } else { - tmp := buf.Alloc(blockTrailerLen) - tmp[0] = blockTypeNoCompression - b = buf.Bytes() - } - - // Calculate the checksum. - n := len(b) - 4 - checksum := util.NewCRC(b[:n]).Value() - binary.LittleEndian.PutUint32(b[n:], checksum) - - // Write the buffer to the file. - _, err = w.writer.Write(b) - if err != nil { - return - } - bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} - w.offset += uint64(len(b)) - return -} - -func (w *Writer) flushPendingBH(key []byte) { - if w.pendingBH.length == 0 { - return - } - var separator []byte - if len(key) == 0 { - separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) - } else { - separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) - } - if separator == nil { - separator = w.dataBlock.prevKey - } else { - w.comparerScratch = separator - } - n := encodeBlockHandle(w.scratch[:20], w.pendingBH) - // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) - // Reset prev key of the data block. - w.dataBlock.prevKey = w.dataBlock.prevKey[:0] - // Clear pending block handle. - w.pendingBH = blockHandle{} -} - -func (w *Writer) finishBlock() error { - w.dataBlock.finish() - bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - return err - } - w.pendingBH = bh - // Reset the data block. - w.dataBlock.reset() - // Flush the filter block. - w.filterBlock.flush(w.offset) - return nil -} - -// Append appends key/value pair to the table. The keys passed must -// be in increasing order. -// -// It is safe to modify the contents of the arguments after Append returns. -func (w *Writer) Append(key, value []byte) error { - if w.err != nil { - return w.err - } - if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { - w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) - return w.err - } - - w.flushPendingBH(key) - // Append key/value pair to the data block. - w.dataBlock.append(key, value) - // Add key to the filter block. - w.filterBlock.add(key) - - // Finish the data block if block size target reached. - if w.dataBlock.bytesLen() >= w.blockSize { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.nEntries++ - return nil -} - -// BlocksLen returns number of blocks written so far. -func (w *Writer) BlocksLen() int { - n := w.indexBlock.nEntries - if w.pendingBH.length > 0 { - // Includes the pending block. - n++ - } - return n -} - -// EntriesLen returns number of entries added so far. -func (w *Writer) EntriesLen() int { - return w.nEntries -} - -// BytesLen returns number of bytes written so far. -func (w *Writer) BytesLen() int { - return int(w.offset) -} - -// Close will finalize the table. Calling Append is not possible -// after Close, but calling BlocksLen, EntriesLen and BytesLen -// is still possible. -func (w *Writer) Close() error { - if w.err != nil { - return w.err - } - - // Write the last data block. Or empty data block if there - // aren't any data blocks at all. - if w.dataBlock.nEntries > 0 || w.nEntries == 0 { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.flushPendingBH(nil) - - // Write the filter block. - var filterBH blockHandle - w.filterBlock.finish() - if buf := &w.filterBlock.buf; buf.Len() > 0 { - filterBH, w.err = w.writeBlock(buf, opt.NoCompression) - if w.err != nil { - return w.err - } - } - - // Write the metaindex block. - if filterBH.length > 0 { - key := []byte("filter." + w.filter.Name()) - n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) - } - w.dataBlock.finish() - metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the index block. - w.indexBlock.finish() - indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the table footer. - footer := w.scratch[:footerLen] - for i := range footer { - footer[i] = 0 - } - n := encodeBlockHandle(footer, metaindexBH) - encodeBlockHandle(footer[n:], indexBH) - copy(footer[footerLen-len(magic):], magic) - if _, err := w.writer.Write(footer); err != nil { - w.err = err - return w.err - } - w.offset += footerLen - - w.err = errors.New("leveldb/table: writer is closed") - return nil -} - -// NewWriter creates a new initialized table writer for the file. -// -// Table writer is not safe for concurrent use. -func NewWriter(f io.Writer, o *opt.Options) *Writer { - w := &Writer{ - writer: f, - cmp: o.GetComparer(), - filter: o.GetFilter(), - compression: o.GetCompression(), - blockSize: o.GetBlockSize(), - comparerScratch: make([]byte, 0), - } - // data block - w.dataBlock.restartInterval = o.GetBlockRestartInterval() - // The first 20-bytes are used for encoding block handle. - w.dataBlock.scratch = w.scratch[20:] - // index block - w.indexBlock.restartInterval = 1 - w.indexBlock.scratch = w.scratch[20:] - // filter block - if w.filter != nil { - w.filterBlock.generator = w.filter.NewGenerator() - w.filterBlock.flush(0) - } - return w -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go deleted file mode 100644 index 0e2b519e..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func shorten(str string) string { - if len(str) <= 8 { - return str - } - return str[:3] + ".." + str[len(str)-3:] -} - -var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"} - -func shortenb(bytes int) string { - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%d%sB", bytes, bunits[i]) -} - -func sshortenb(bytes int) string { - if bytes == 0 { - return "~" - } - sign := "+" - if bytes < 0 { - sign = "-" - bytes *= -1 - } - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) -} - -func sint(x int) string { - if x == 0 { - return "~" - } - sign := "+" - if x < 0 { - sign = "-" - x *= -1 - } - return fmt.Sprintf("%s%d", sign, x) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -type fdSorter []storage.FileDesc - -func (p fdSorter) Len() int { - return len(p) -} - -func (p fdSorter) Less(i, j int) bool { - return p[i].Num < p[j].Num -} - -func (p fdSorter) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func sortFds(fds []storage.FileDesc) { - sort.Sort(fdSorter(fds)) -} - -func ensureBuffer(b []byte, n int) []byte { - if cap(b) < n { - return make([]byte, n) - } - return b[:n] -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go deleted file mode 100644 index 21de2425..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -// This a copy of Go std bytes.Buffer with some modification -// and some features stripped. - -import ( - "bytes" - "io" -) - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. -} - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - switch { - case n < 0 || n > b.Len(): - panic("leveldb/util.Buffer: truncation out of range") - case n == 0: - // Reuse buffer space. - b.off = 0 - } - b.buf = b.buf[0 : b.off+n] -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Truncate(0) - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if b.buf == nil && n <= len(b.bootstrap) { - buf = b.bootstrap[0:] - } else if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - } - b.buf = buf - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Alloc allocs n bytes of slice from the buffer, growing the buffer as -// needed. If n is negative, Alloc will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Alloc(n int) []byte { - if n < 0 { - panic("leveldb/util.Buffer.Alloc: negative count") - } - m := b.grow(n) - return b.buf[m:] -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("leveldb/util.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with bytes.ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const MinRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with bytes.ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("leveldb/util.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// bytes.ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go deleted file mode 100644 index 2f3db974..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "fmt" - "sync" - "sync/atomic" - "time" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [6]chan []byte - size [5]uint32 - sizeMiss [5]uint32 - sizeHalf [5]uint32 - baseline [4]int - baseline0 int - - mu sync.RWMutex - closed bool - closeC chan struct{} - - get uint32 - put uint32 - half uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - if n <= p.baseline0 && n > p.baseline0/2 { - return 0 - } - for i, x := range p.baseline { - if n <= x { - return i + 1 - } - } - return len(p.baseline) + 1 -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - if p == nil { - return make([]byte, n) - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return make([]byte, n) - } - - atomic.AddUint32(&p.get, 1) - - poolNum := p.poolNum(n) - pool := p.pool[poolNum] - if poolNum == 0 { - // Fast path. - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - select { - case pool <- b: - default: - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - } - default: - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - sizeHalfPtr := &p.sizeHalf[poolNum-1] - if atomic.AddUint32(sizeHalfPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) - atomic.StoreUint32(sizeHalfPtr, 0) - } else { - select { - case pool <- b: - default: - } - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - select { - case pool <- b: - default: - } - } - } - default: - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - if p == nil { - return - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return - } - - atomic.AddUint32(&p.put, 1) - - pool := p.pool[p.poolNum(cap(b))] - select { - case pool <- b: - default: - } - -} - -func (p *BufferPool) Close() { - if p == nil { - return - } - - p.mu.Lock() - if !p.closed { - p.closed = true - p.closeC <- struct{}{} - } - p.mu.Unlock() -} - -func (p *BufferPool) String() string { - if p == nil { - return "" - } - - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) -} - -func (p *BufferPool) drain() { - ticker := time.NewTicker(2 * time.Second) - defer ticker.Stop() - for { - select { - case <-ticker.C: - for _, ch := range p.pool { - select { - case <-ch: - default: - } - } - case <-p.closeC: - close(p.closeC) - for _, ch := range p.pool { - close(ch) - } - return - } - } -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - p := &BufferPool{ - baseline0: baseline, - baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, - closeC: make(chan struct{}, 1), - } - for i, cap := range []int{2, 2, 4, 4, 2, 1} { - p.pool[i] = make(chan []byte, cap) - } - go p.drain() - return p -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go deleted file mode 100644 index 631c9d61..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "hash/crc32" -) - -var table = crc32.MakeTable(crc32.Castagnoli) - -// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. -type CRC uint32 - -// NewCRC creates a new crc based on the given bytes. -func NewCRC(b []byte) CRC { - return CRC(0).Update(b) -} - -// Update updates the crc with the given bytes. -func (c CRC) Update(b []byte) CRC { - return CRC(crc32.Update(uint32(c), table, b)) -} - -// Value returns a masked crc. -func (c CRC) Value() uint32 { - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go deleted file mode 100644 index 7f3fa4e2..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "encoding/binary" -) - -// Hash return hash of the given data. -func Hash(data []byte, seed uint32) uint32 { - // Similar to murmur hash - const ( - m = uint32(0xc6a4a793) - r = uint32(24) - ) - var ( - h = seed ^ (uint32(len(data)) * m) - i int - ) - - for n := len(data) - len(data)%4; i < n; i += 4 { - h += binary.LittleEndian.Uint32(data[i:]) - h *= m - h ^= (h >> 16) - } - - switch len(data) - i { - default: - panic("not reached") - case 3: - h += uint32(data[i+2]) << 16 - fallthrough - case 2: - h += uint32(data[i+1]) << 8 - fallthrough - case 1: - h += uint32(data[i]) - h *= m - h ^= (h >> r) - case 0: - } - - return h -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go deleted file mode 100644 index 85159583..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -// Range is a key range. -type Range struct { - // Start of the key range, include in the range. - Start []byte - - // Limit of the key range, not include in the range. - Limit []byte -} - -// BytesPrefix returns key range that satisfy the given prefix. -// This only applicable for the standard 'bytes comparer'. -func BytesPrefix(prefix []byte) *Range { - var limit []byte - for i := len(prefix) - 1; i >= 0; i-- { - c := prefix[i] - if c < 0xff { - limit = make([]byte, i+1) - copy(limit, prefix) - limit[i] = c + 1 - break - } - } - return &Range{prefix, limit} -} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go deleted file mode 100644 index 80614afc..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package util provides utilities used throughout leveldb. -package util - -import ( - "errors" -) - -var ( - ErrReleased = errors.New("leveldb: resource already relesed") - ErrHasReleaser = errors.New("leveldb: releaser already defined") -) - -// Releaser is the interface that wraps the basic Release method. -type Releaser interface { - // Release releases associated resources. Release should always success - // and can be called multiple times without causing error. - Release() -} - -// ReleaseSetter is the interface that wraps the basic SetReleaser method. -type ReleaseSetter interface { - // SetReleaser associates the given releaser to the resources. The - // releaser will be called once coresponding resources released. - // Calling SetReleaser with nil will clear the releaser. - // - // This will panic if a releaser already present or coresponding - // resource is already released. Releaser should be cleared first - // before assigned a new one. - SetReleaser(releaser Releaser) -} - -// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. -type BasicReleaser struct { - releaser Releaser - released bool -} - -// Released returns whether Release method already called. -func (r *BasicReleaser) Released() bool { - return r.released -} - -// Release implements Releaser.Release. -func (r *BasicReleaser) Release() { - if !r.released { - if r.releaser != nil { - r.releaser.Release() - r.releaser = nil - } - r.released = true - } -} - -// SetReleaser implements ReleaseSetter.SetReleaser. -func (r *BasicReleaser) SetReleaser(releaser Releaser) { - if r.released { - panic(ErrReleased) - } - if r.releaser != nil && releaser != nil { - panic(ErrHasReleaser) - } - r.releaser = releaser -} - -type NoopReleaser struct{} - -func (NoopReleaser) Release() {} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go deleted file mode 100644 index 73f272af..00000000 --- a/vendor/github.com/syndtr/goleveldb/leveldb/version.go +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tSet struct { - level int - table *tFile -} - -type version struct { - s *session - - levels []tFiles - - // Level that should be compacted next and its compaction score. - // Score < 1 means compaction is not strictly needed. These fields - // are initialized by computeCompaction() - cLevel int - cScore float64 - - cSeek unsafe.Pointer - - closing bool - ref int - released bool -} - -func newVersion(s *session) *version { - return &version{s: s} -} - -func (v *version) incref() { - if v.released { - panic("already released") - } - - v.ref++ - if v.ref == 1 { - // Incr file ref. - for _, tt := range v.levels { - for _, t := range tt { - v.s.addFileRef(t.fd, 1) - } - } - } -} - -func (v *version) releaseNB() { - v.ref-- - if v.ref > 0 { - return - } else if v.ref < 0 { - panic("negative version ref") - } - - for _, tt := range v.levels { - for _, t := range tt { - if v.s.addFileRef(t.fd, -1) == 0 { - v.s.tops.remove(t) - } - } - } - - v.released = true -} - -func (v *version) release() { - v.s.vmu.Lock() - v.releaseNB() - v.s.vmu.Unlock() -} - -func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) { - ukey := ikey.ukey() - - // Aux level. - if aux != nil { - for _, t := range aux { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(-1, t) { - return - } - } - } - - if lf != nil && !lf(-1) { - return - } - } - - // Walk tables level-by-level. - for level, tables := range v.levels { - if len(tables) == 0 { - continue - } - - if level == 0 { - // Level-0 files may overlap each other. Find all files that - // overlap ukey. - for _, t := range tables { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(level, t) { - return - } - } - } - } else { - if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { - t := tables[i] - if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - if !f(level, t) { - return - } - } - } - } - - if lf != nil && !lf(level) { - return - } - } -} - -func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { - if v.closing { - return nil, false, ErrClosed - } - - ukey := ikey.ukey() - - var ( - tset *tSet - tseek bool - - // Level-0. - zfound bool - zseq uint64 - zkt keyType - zval []byte - ) - - err = ErrNotFound - - // Since entries never hop across level, finding key/value - // in smaller level make later levels irrelevant. - v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool { - if level >= 0 && !tseek { - if tset == nil { - tset = &tSet{level, t} - } else { - tseek = true - } - } - - var ( - fikey, fval []byte - ferr error - ) - if noValue { - fikey, ferr = v.s.tops.findKey(t, ikey, ro) - } else { - fikey, fval, ferr = v.s.tops.find(t, ikey, ro) - } - - switch ferr { - case nil: - case ErrNotFound: - return true - default: - err = ferr - return false - } - - if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil { - if v.s.icmp.uCompare(ukey, fukey) == 0 { - // Level <= 0 may overlaps each-other. - if level <= 0 { - if fseq >= zseq { - zfound = true - zseq = fseq - zkt = fkt - zval = fval - } - } else { - switch fkt { - case keyTypeVal: - value = fval - err = nil - case keyTypeDel: - default: - panic("leveldb: invalid internalKey type") - } - return false - } - } - } else { - err = fkerr - return false - } - - return true - }, func(level int) bool { - if zfound { - switch zkt { - case keyTypeVal: - value = zval - err = nil - case keyTypeDel: - default: - panic("leveldb: invalid internalKey type") - } - return false - } - - return true - }) - - if tseek && tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - - return -} - -func (v *version) sampleSeek(ikey internalKey) (tcomp bool) { - var tset *tSet - - v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool { - if tset == nil { - tset = &tSet{level, t} - return true - } - if tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - return false - }, nil) - - return -} - -func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for level, tables := range v.levels { - if level == 0 { - // Merge all level zero files together since they may overlap. - for _, t := range tables { - its = append(its, v.s.tops.newIterator(t, slice, ro)) - } - } else if len(tables) != 0 { - its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)) - } - } - return -} - -func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v} -} - -// Spawn a new version based on this version. -func (v *version) spawn(r *sessionRecord) *version { - staging := v.newStaging() - staging.commit(r) - return staging.finish() -} - -func (v *version) fillRecord(r *sessionRecord) { - for level, tables := range v.levels { - for _, t := range tables { - r.addTableFile(level, t) - } - } -} - -func (v *version) tLen(level int) int { - if level < len(v.levels) { - return len(v.levels[level]) - } - return 0 -} - -func (v *version) offsetOf(ikey internalKey) (n int64, err error) { - for level, tables := range v.levels { - for _, t := range tables { - if v.s.icmp.Compare(t.imax, ikey) <= 0 { - // Entire file is before "ikey", so just add the file size - n += t.size - } else if v.s.icmp.Compare(t.imin, ikey) > 0 { - // Entire file is after "ikey", so ignore - if level > 0 { - // Files other than level 0 are sorted by meta->min, so - // no further files in this level will contain data for - // "ikey". - break - } - } else { - // "ikey" falls in the range for this table. Add the - // approximate offset of "ikey" within the table. - if m, err := v.s.tops.offsetOf(t, ikey); err == nil { - n += m - } else { - return 0, err - } - } - } - } - - return -} - -func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) { - if maxLevel > 0 { - if len(v.levels) == 0 { - return maxLevel - } - if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - for ; level < maxLevel; level++ { - if pLevel := level + 1; pLevel >= len(v.levels) { - return maxLevel - } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) { - break - } - if gpLevel := level + 2; gpLevel < len(v.levels) { - overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) { - break - } - } - } - } - } - return -} - -func (v *version) computeCompaction() { - // Precomputed best level for next compaction - bestLevel := int(-1) - bestScore := float64(-1) - - statFiles := make([]int, len(v.levels)) - statSizes := make([]string, len(v.levels)) - statScore := make([]string, len(v.levels)) - statTotSize := int64(0) - - for level, tables := range v.levels { - var score float64 - size := tables.size() - if level == 0 { - // We treat level-0 specially by bounding the number of files - // instead of number of bytes for two reasons: - // - // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compaction. - // - // (2) The files in level-0 are merged on every read and - // therefore we wish to avoid too many files when the individual - // file size is small (perhaps because of a small write-buffer - // setting, or very high compression ratios, or lots of - // overwrites/deletions). - score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) - } else { - score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level)) - } - - if score > bestScore { - bestLevel = level - bestScore = score - } - - statFiles[level] = len(tables) - statSizes[level] = shortenb(int(size)) - statScore[level] = fmt.Sprintf("%.2f", score) - statTotSize += size - } - - v.cLevel = bestLevel - v.cScore = bestScore - - v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) -} - -func (v *version) needCompaction() bool { - return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil -} - -type tablesScratch struct { - added map[int64]atRecord - deleted map[int64]struct{} -} - -type versionStaging struct { - base *version - levels []tablesScratch -} - -func (p *versionStaging) getScratch(level int) *tablesScratch { - if level >= len(p.levels) { - newLevels := make([]tablesScratch, level+1) - copy(newLevels, p.levels) - p.levels = newLevels - } - return &(p.levels[level]) -} - -func (p *versionStaging) commit(r *sessionRecord) { - // Deleted tables. - for _, r := range r.deletedTables { - scratch := p.getScratch(r.level) - if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 { - if scratch.deleted == nil { - scratch.deleted = make(map[int64]struct{}) - } - scratch.deleted[r.num] = struct{}{} - } - if scratch.added != nil { - delete(scratch.added, r.num) - } - } - - // New tables. - for _, r := range r.addedTables { - scratch := p.getScratch(r.level) - if scratch.added == nil { - scratch.added = make(map[int64]atRecord) - } - scratch.added[r.num] = r - if scratch.deleted != nil { - delete(scratch.deleted, r.num) - } - } -} - -func (p *versionStaging) finish() *version { - // Build new version. - nv := newVersion(p.base.s) - numLevel := len(p.levels) - if len(p.base.levels) > numLevel { - numLevel = len(p.base.levels) - } - nv.levels = make([]tFiles, numLevel) - for level := 0; level < numLevel; level++ { - var baseTabels tFiles - if level < len(p.base.levels) { - baseTabels = p.base.levels[level] - } - - if level < len(p.levels) { - scratch := p.levels[level] - - var nt tFiles - // Prealloc list if possible. - if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 { - nt = make(tFiles, 0, n) - } - - // Base tables. - for _, t := range baseTabels { - if _, ok := scratch.deleted[t.fd.Num]; ok { - continue - } - if _, ok := scratch.added[t.fd.Num]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range scratch.added { - nt = append(nt, tableFileFromRecord(r)) - } - - if len(nt) != 0 { - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - - nv.levels[level] = nt - } - } else { - nv.levels[level] = baseTabels - } - } - - // Trim levels. - n := len(nv.levels) - for ; n > 0 && nv.levels[n-1] == nil; n-- { - } - nv.levels = nv.levels[:n] - - // Compute compaction score for new version. - nv.computeCompaction() - - return nv -} - -type versionReleaser struct { - v *version - once bool -} - -func (vr *versionReleaser) Release() { - v := vr.v - v.s.vmu.Lock() - if !vr.once { - v.releaseNB() - vr.once = true - } - v.s.vmu.Unlock() -} diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt deleted file mode 100644 index 8765c9fb..00000000 --- a/vendor/go.uber.org/atomic/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go deleted file mode 100644 index 1db6849f..00000000 --- a/vendor/go.uber.org/atomic/atomic.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic - -import ( - "math" - "sync/atomic" - "time" -) - -// Int32 is an atomic wrapper around an int32. -type Int32 struct{ v int32 } - -// NewInt32 creates an Int32. -func NewInt32(i int32) *Int32 { - return &Int32{i} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) -} - -// Int64 is an atomic wrapper around an int64. -type Int64 struct{ v int64 } - -// NewInt64 creates an Int64. -func NewInt64(i int64) *Int64 { - return &Int64{i} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) -} - -// Uint32 is an atomic wrapper around an uint32. -type Uint32 struct{ v uint32 } - -// NewUint32 creates a Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) -} - -// Uint64 is an atomic wrapper around a uint64. -type Uint64 struct{ v uint64 } - -// NewUint64 creates a Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) -} - -// Bool is an atomic Boolean. -type Bool struct{ v uint32 } - -// NewBool creates a Bool. -func NewBool(initial bool) *Bool { - return &Bool{boolToInt(initial)} -} - -// Load atomically loads the Boolean. -func (b *Bool) Load() bool { - return truthy(atomic.LoadUint32(&b.v)) -} - -// CAS is an atomic compare-and-swap. -func (b *Bool) CAS(old, new bool) bool { - return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) -} - -// Store atomically stores the passed value. -func (b *Bool) Store(new bool) { - atomic.StoreUint32(&b.v, boolToInt(new)) -} - -// Swap sets the given value and returns the previous value. -func (b *Bool) Swap(new bool) bool { - return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { - return truthy(atomic.AddUint32(&b.v, 1) - 1) -} - -func truthy(n uint32) bool { - return n&1 == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Float64 is an atomic wrapper around float64. -type Float64 struct { - v uint64 -} - -// NewFloat64 creates a Float64. -func NewFloat64(f float64) *Float64 { - return &Float64{math.Float64bits(f)} -} - -// Load atomically loads the wrapped value. -func (f *Float64) Load() float64 { - return math.Float64frombits(atomic.LoadUint64(&f.v)) -} - -// Store atomically stores the passed value. -func (f *Float64) Store(s float64) { - atomic.StoreUint64(&f.v, math.Float64bits(s)) -} - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { - for { - old := f.Load() - new := old + s - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) -} - -// CAS is an atomic compare-and-swap. -func (f *Float64) CAS(old, new float64) bool { - return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) -} - -// Duration is an atomic wrapper around time.Duration -// https://godoc.org/time#Duration -type Duration struct { - v Int64 -} - -// NewDuration creates a Duration. -func NewDuration(d time.Duration) *Duration { - return &Duration{v: *NewInt64(int64(d))} -} - -// Load atomically loads the wrapped value. -func (d *Duration) Load() time.Duration { - return time.Duration(d.v.Load()) -} - -// Store atomically stores the passed value. -func (d *Duration) Store(n time.Duration) { - d.v.Store(int64(n)) -} - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) -} - -// Swap atomically swaps the wrapped time.Duration and returns the old value. -func (d *Duration) Swap(n time.Duration) time.Duration { - return time.Duration(d.v.Swap(int64(n))) -} - -// CAS is an atomic compare-and-swap. -func (d *Duration) CAS(old, new time.Duration) bool { - return d.v.CAS(int64(old), int64(new)) -} - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index ede8136f..00000000 --- a/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper around Value for strings. -type String struct{ v Value } - -// NewString creates a String. -func NewString(str string) *String { - s := &String{} - if str != "" { - s.Store(str) - } - return s -} - -// Load atomically loads the wrapped string. -func (s *String) Load() string { - v := s.v.Load() - if v == nil { - return "" - } - return v.(string) -} - -// Store atomically stores the passed string. -// Note: Converting the string to an interface{} to store in the Value -// requires an allocation. -func (s *String) Store(str string) { - s.v.Store(str) -} diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt deleted file mode 100644 index 858e0247..00000000 --- a/vendor/go.uber.org/multierr/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go deleted file mode 100644 index de6ce473..00000000 --- a/vendor/go.uber.org/multierr/error.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package multierr allows combining one or more errors together. -// -// Overview -// -// Errors can be combined with the use of the Combine function. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) -// -// If only two errors are being combined, the Append function may be used -// instead. -// -// err = multierr.Combine(reader.Close(), writer.Close()) -// -// This makes it possible to record resource cleanup failures from deferred -// blocks with the help of named return values. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } -// -// The underlying list of errors for a returned error object may be retrieved -// with the Errors function. -// -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:") -// } -// -// Advanced Usage -// -// Errors returned by Combine and Append MAY implement the following -// interface. -// -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } -// -// Note that if you need access to list of errors behind a multierr error, you -// should prefer using the Errors function. That said, if you need cheap -// read-only access to the underlying errors slice, you can attempt to cast -// the error to this interface. You MUST handle the failure case gracefully -// because errors returned by Combine and Append are not guaranteed to -// implement this interface. -// -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } -package multierr // import "go.uber.org/multierr" - -import ( - "bytes" - "fmt" - "io" - "strings" - "sync" - - "go.uber.org/atomic" -) - -var ( - // Separator for single-line error messages. - _singlelineSeparator = []byte("; ") - - _newline = []byte("\n") - - // Prefix for multi-line messages - _multilinePrefix = []byte("the following errors occurred:") - - // Prefix for the first and following lines of an item in a list of - // multi-line error messages. - // - // For example, if a single item is: - // - // foo - // bar - // - // It will become, - // - // - foo - // bar - _multilineSeparator = []byte("\n - ") - _multilineIndent = []byte(" ") -) - -// _bufferPool is a pool of bytes.Buffers. -var _bufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, -} - -type errorGroup interface { - Errors() []error -} - -// Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, the returned slice is empty. -// -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) -// -// If the error is not composed of other errors, the returned slice contains -// just the error that was passed in. -// -// Callers of this function are free to modify the returned slice. -func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result -} - -// multiError is an error that holds one or more errors. -// -// An instance of this is guaranteed to be non-empty and flattened. That is, -// none of the errors inside multiError are other multiErrors. -// -// multiError formats to a semi-colon delimited list of error messages with -// %v and with a more readable multi-line format with %+v. -type multiError struct { - copyNeeded atomic.Bool - errors []error -} - -var _ errorGroup = (*multiError)(nil) - -// Errors returns the list of underlying errors. -// -// This slice MUST NOT be modified. -func (merr *multiError) Errors() []error { - if merr == nil { - return nil - } - return merr.errors -} - -func (merr *multiError) Error() string { - if merr == nil { - return "" - } - - buff := _bufferPool.Get().(*bytes.Buffer) - buff.Reset() - - merr.writeSingleline(buff) - - result := buff.String() - _bufferPool.Put(buff) - return result -} - -func (merr *multiError) Format(f fmt.State, c rune) { - if c == 'v' && f.Flag('+') { - merr.writeMultiline(f) - } else { - merr.writeSingleline(f) - } -} - -func (merr *multiError) writeSingleline(w io.Writer) { - first := true - for _, item := range merr.errors { - if first { - first = false - } else { - w.Write(_singlelineSeparator) - } - io.WriteString(w, item.Error()) - } -} - -func (merr *multiError) writeMultiline(w io.Writer) { - w.Write(_multilinePrefix) - for _, item := range merr.errors { - w.Write(_multilineSeparator) - writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) - } -} - -// Writes s to the writer with the given prefix added before each line after -// the first. -func writePrefixLine(w io.Writer, prefix []byte, s string) { - first := true - for len(s) > 0 { - if first { - first = false - } else { - w.Write(prefix) - } - - idx := strings.IndexByte(s, '\n') - if idx < 0 { - idx = len(s) - 1 - } - - io.WriteString(w, s[:idx+1]) - s = s[idx+1:] - } -} - -type inspectResult struct { - // Number of top-level non-nil errors - Count int - - // Total number of errors including multiErrors - Capacity int - - // Index of the first non-nil error in the list. Value is meaningless if - // Count is zero. - FirstErrorIdx int - - // Whether the list contains at least one multiError - ContainsMultiError bool -} - -// Inspects the given slice of errors so that we can efficiently allocate -// space for it. -func inspect(errors []error) (res inspectResult) { - first := true - for i, err := range errors { - if err == nil { - continue - } - - res.Count++ - if first { - first = false - res.FirstErrorIdx = i - } - - if merr, ok := err.(*multiError); ok { - res.Capacity += len(merr.errors) - res.ContainsMultiError = true - } else { - res.Capacity++ - } - } - return -} - -// fromSlice converts the given list of errors into a single error. -func fromSlice(errors []error) error { - res := inspect(errors) - switch res.Count { - case 0: - return nil - case 1: - // only one non-nil entry - return errors[res.FirstErrorIdx] - case len(errors): - if !res.ContainsMultiError { - // already flat - return &multiError{errors: errors} - } - } - - nonNilErrs := make([]error, 0, res.Capacity) - for _, err := range errors[res.FirstErrorIdx:] { - if err == nil { - continue - } - - if nested, ok := err.(*multiError); ok { - nonNilErrs = append(nonNilErrs, nested.errors...) - } else { - nonNilErrs = append(nonNilErrs, err) - } - } - - return &multiError{errors: nonNilErrs} -} - -// Combine combines the passed errors into a single error. -// -// If zero arguments were passed or if all items are nil, a nil error is -// returned. -// -// Combine(nil, nil) // == nil -// -// If only a single error was passed, it is returned as-is. -// -// Combine(err) // == err -// -// Combine skips over nil arguments so this function may be used to combine -// together errors from operations that fail independently of each other. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) -// -// If any of the passed errors is a multierr error, it will be flattened along -// with the other errors. -// -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) -// -// The returned error formats into a readable multi-line error message if -// formatted with %+v. -// -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) -func Combine(errors ...error) error { - return fromSlice(errors) -} - -// Append appends the given errors together. Either value may be nil. -// -// This function is a specialization of Combine for the common case where -// there are only two errors. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The following pattern may also be used to record failure of deferred -// operations without losing information about the original error. -// -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() -func Append(left error, right error) error { - switch { - case left == nil: - return right - case right == nil: - return left - } - - if _, ok := right.(*multiError); !ok { - if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { - // Common case where the error on the left is constantly being - // appended to. - errs := append(l.errors, right) - return &multiError{errors: errs} - } else if !ok { - // Both errors are single errors. - return &multiError{errors: []error{left, right}} - } - } - - // Either right or both, left and right, are multiErrors. Rely on usual - // expensive logic. - errors := [2]error{left, right} - return fromSlice(errors[0:]) -} diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt deleted file mode 100644 index 6652bed4..00000000 --- a/vendor/go.uber.org/zap/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016-2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go deleted file mode 100644 index 5be3704a..00000000 --- a/vendor/go.uber.org/zap/array.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "time" - - "go.uber.org/zap/zapcore" -) - -// Array constructs a field with the given key and ArrayMarshaler. It provides -// a flexible, but still type-safe and efficient, way to add array-like types -// to the logging context. The struct's MarshalLogArray method is called lazily. -func Array(key string, val zapcore.ArrayMarshaler) Field { - return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} -} - -// Bools constructs a field that carries a slice of bools. -func Bools(key string, bs []bool) Field { - return Array(key, bools(bs)) -} - -// ByteStrings constructs a field that carries a slice of []byte, each of which -// must be UTF-8 encoded text. -func ByteStrings(key string, bss [][]byte) Field { - return Array(key, byteStringsArray(bss)) -} - -// Complex128s constructs a field that carries a slice of complex numbers. -func Complex128s(key string, nums []complex128) Field { - return Array(key, complex128s(nums)) -} - -// Complex64s constructs a field that carries a slice of complex numbers. -func Complex64s(key string, nums []complex64) Field { - return Array(key, complex64s(nums)) -} - -// Durations constructs a field that carries a slice of time.Durations. -func Durations(key string, ds []time.Duration) Field { - return Array(key, durations(ds)) -} - -// Float64s constructs a field that carries a slice of floats. -func Float64s(key string, nums []float64) Field { - return Array(key, float64s(nums)) -} - -// Float32s constructs a field that carries a slice of floats. -func Float32s(key string, nums []float32) Field { - return Array(key, float32s(nums)) -} - -// Ints constructs a field that carries a slice of integers. -func Ints(key string, nums []int) Field { - return Array(key, ints(nums)) -} - -// Int64s constructs a field that carries a slice of integers. -func Int64s(key string, nums []int64) Field { - return Array(key, int64s(nums)) -} - -// Int32s constructs a field that carries a slice of integers. -func Int32s(key string, nums []int32) Field { - return Array(key, int32s(nums)) -} - -// Int16s constructs a field that carries a slice of integers. -func Int16s(key string, nums []int16) Field { - return Array(key, int16s(nums)) -} - -// Int8s constructs a field that carries a slice of integers. -func Int8s(key string, nums []int8) Field { - return Array(key, int8s(nums)) -} - -// Strings constructs a field that carries a slice of strings. -func Strings(key string, ss []string) Field { - return Array(key, stringArray(ss)) -} - -// Times constructs a field that carries a slice of time.Times. -func Times(key string, ts []time.Time) Field { - return Array(key, times(ts)) -} - -// Uints constructs a field that carries a slice of unsigned integers. -func Uints(key string, nums []uint) Field { - return Array(key, uints(nums)) -} - -// Uint64s constructs a field that carries a slice of unsigned integers. -func Uint64s(key string, nums []uint64) Field { - return Array(key, uint64s(nums)) -} - -// Uint32s constructs a field that carries a slice of unsigned integers. -func Uint32s(key string, nums []uint32) Field { - return Array(key, uint32s(nums)) -} - -// Uint16s constructs a field that carries a slice of unsigned integers. -func Uint16s(key string, nums []uint16) Field { - return Array(key, uint16s(nums)) -} - -// Uint8s constructs a field that carries a slice of unsigned integers. -func Uint8s(key string, nums []uint8) Field { - return Array(key, uint8s(nums)) -} - -// Uintptrs constructs a field that carries a slice of pointer addresses. -func Uintptrs(key string, us []uintptr) Field { - return Array(key, uintptrs(us)) -} - -// Errors constructs a field that carries a slice of errors. -func Errors(key string, errs []error) Field { - return Array(key, errArray(errs)) -} - -type bools []bool - -func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bs { - arr.AppendBool(bs[i]) - } - return nil -} - -type byteStringsArray [][]byte - -func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bss { - arr.AppendByteString(bss[i]) - } - return nil -} - -type complex128s []complex128 - -func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex128(nums[i]) - } - return nil -} - -type complex64s []complex64 - -func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex64(nums[i]) - } - return nil -} - -type durations []time.Duration - -func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ds { - arr.AppendDuration(ds[i]) - } - return nil -} - -type float64s []float64 - -func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat64(nums[i]) - } - return nil -} - -type float32s []float32 - -func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat32(nums[i]) - } - return nil -} - -type ints []int - -func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt(nums[i]) - } - return nil -} - -type int64s []int64 - -func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt64(nums[i]) - } - return nil -} - -type int32s []int32 - -func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt32(nums[i]) - } - return nil -} - -type int16s []int16 - -func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt16(nums[i]) - } - return nil -} - -type int8s []int8 - -func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt8(nums[i]) - } - return nil -} - -type stringArray []string - -func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ss { - arr.AppendString(ss[i]) - } - return nil -} - -type times []time.Time - -func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ts { - arr.AppendTime(ts[i]) - } - return nil -} - -type uints []uint - -func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint(nums[i]) - } - return nil -} - -type uint64s []uint64 - -func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint64(nums[i]) - } - return nil -} - -type uint32s []uint32 - -func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint32(nums[i]) - } - return nil -} - -type uint16s []uint16 - -func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint16(nums[i]) - } - return nil -} - -type uint8s []uint8 - -func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint8(nums[i]) - } - return nil -} - -type uintptrs []uintptr - -func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUintptr(nums[i]) - } - return nil -} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go deleted file mode 100644 index 7592e8c6..00000000 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package buffer provides a thin wrapper around a byte slice. Unlike the -// standard library's bytes.Buffer, it supports a portion of the strconv -// package's zero-allocation formatters. -package buffer // import "go.uber.org/zap/buffer" - -import "strconv" - -const _size = 1024 // by default, create 1 KiB buffers - -// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so -// the only way to construct one is via a Pool. -type Buffer struct { - bs []byte - pool Pool -} - -// AppendByte writes a single byte to the Buffer. -func (b *Buffer) AppendByte(v byte) { - b.bs = append(b.bs, v) -} - -// AppendString writes a string to the Buffer. -func (b *Buffer) AppendString(s string) { - b.bs = append(b.bs, s...) -} - -// AppendInt appends an integer to the underlying buffer (assuming base 10). -func (b *Buffer) AppendInt(i int64) { - b.bs = strconv.AppendInt(b.bs, i, 10) -} - -// AppendUint appends an unsigned integer to the underlying buffer (assuming -// base 10). -func (b *Buffer) AppendUint(i uint64) { - b.bs = strconv.AppendUint(b.bs, i, 10) -} - -// AppendBool appends a bool to the underlying buffer. -func (b *Buffer) AppendBool(v bool) { - b.bs = strconv.AppendBool(b.bs, v) -} - -// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN -// or +/- Inf. -func (b *Buffer) AppendFloat(f float64, bitSize int) { - b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) -} - -// Len returns the length of the underlying byte slice. -func (b *Buffer) Len() int { - return len(b.bs) -} - -// Cap returns the capacity of the underlying byte slice. -func (b *Buffer) Cap() int { - return cap(b.bs) -} - -// Bytes returns a mutable reference to the underlying byte slice. -func (b *Buffer) Bytes() []byte { - return b.bs -} - -// String returns a string copy of the underlying byte slice. -func (b *Buffer) String() string { - return string(b.bs) -} - -// Reset resets the underlying byte slice. Subsequent writes re-use the slice's -// backing array. -func (b *Buffer) Reset() { - b.bs = b.bs[:0] -} - -// Write implements io.Writer. -func (b *Buffer) Write(bs []byte) (int, error) { - b.bs = append(b.bs, bs...) - return len(bs), nil -} - -// TrimNewline trims any final "\n" byte from the end of the buffer. -func (b *Buffer) TrimNewline() { - if i := len(b.bs) - 1; i >= 0 { - if b.bs[i] == '\n' { - b.bs = b.bs[:i] - } - } -} - -// Free returns the Buffer to its Pool. -// -// Callers must not retain references to the Buffer after calling Free. -func (b *Buffer) Free() { - b.pool.put(b) -} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go deleted file mode 100644 index 8fb3e202..00000000 --- a/vendor/go.uber.org/zap/buffer/pool.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package buffer - -import "sync" - -// A Pool is a type-safe wrapper around a sync.Pool. -type Pool struct { - p *sync.Pool -} - -// NewPool constructs a new Pool. -func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} -} - -// Get retrieves a Buffer from the pool, creating one if necessary. -func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) - buf.Reset() - buf.pool = p - return buf -} - -func (p Pool) put(buf *Buffer) { - p.p.Put(buf) -} diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go deleted file mode 100644 index 6fe17d9e..00000000 --- a/vendor/go.uber.org/zap/config.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "sort" - "time" - - "go.uber.org/zap/zapcore" -) - -// SamplingConfig sets a sampling strategy for the logger. Sampling caps the -// global CPU and I/O load that logging puts on your process while attempting -// to preserve a representative subset of your logs. -// -// Values configured here are per-second. See zapcore.NewSampler for details. -type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` -} - -// Config offers a declarative way to construct a logger. It doesn't do -// anything that can't be done with New, Options, and the various -// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to -// toggle common options. -// -// Note that Config intentionally supports only the most common options. More -// unusual logging setups (logging to network connections or message queues, -// splitting output between multiple files, etc.) are possible, but require -// direct use of the zapcore package. For sample code, see the package-level -// BasicConfiguration and AdvancedConfiguration examples. -// -// For an example showing runtime log level changes, see the documentation for -// AtomicLevel. -type Config struct { - // Level is the minimum enabled logging level. Note that this is a dynamic - // level, so calling Config.Level.SetLevel will atomically change the log - // level of all loggers descended from this config. - Level AtomicLevel `json:"level" yaml:"level"` - // Development puts the logger in development mode, which changes the - // behavior of DPanicLevel and takes stacktraces more liberally. - Development bool `json:"development" yaml:"development"` - // DisableCaller stops annotating logs with the calling function's file - // name and line number. By default, all logs are annotated. - DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` - // DisableStacktrace completely disables automatic stacktrace capturing. By - // default, stacktraces are captured for WarnLevel and above logs in - // development and ErrorLevel and above in production. - DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` - // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. - Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` - // Encoding sets the logger's encoding. Valid values are "json" and - // "console", as well as any third-party encodings registered via - // RegisterEncoder. - Encoding string `json:"encoding" yaml:"encoding"` - // EncoderConfig sets options for the chosen encoder. See - // zapcore.EncoderConfig for details. - EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` - // OutputPaths is a list of URLs or file paths to write logging output to. - // See Open for details. - OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` - // ErrorOutputPaths is a list of URLs to write internal logger errors to. - // The default is standard error. - // - // Note that this setting only affects internal errors; for sample code that - // sends error-level logs to a different location from info- and debug-level - // logs, see the package-level AdvancedConfiguration example. - ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` - // InitialFields is a collection of fields to add to the root logger. - InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` -} - -// NewProductionEncoderConfig returns an opinionated EncoderConfig for -// production environments. -func NewProductionEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - TimeKey: "ts", - LevelKey: "level", - NameKey: "logger", - CallerKey: "caller", - MessageKey: "msg", - StacktraceKey: "stacktrace", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.EpochTimeEncoder, - EncodeDuration: zapcore.SecondsDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. -// -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. -func NewProductionConfig() Config { - return Config{ - Level: NewAtomicLevelAt(InfoLevel), - Development: false, - Sampling: &SamplingConfig{ - Initial: 100, - Thereafter: 100, - }, - Encoding: "json", - EncoderConfig: NewProductionEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for -// development environments. -func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - // Keys can be anything except the empty string. - TimeKey: "T", - LevelKey: "L", - NameKey: "N", - CallerKey: "C", - MessageKey: "M", - StacktraceKey: "S", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.CapitalLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. -// -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. -func NewDevelopmentConfig() Config { - return Config{ - Level: NewAtomicLevelAt(DebugLevel), - Development: true, - Encoding: "console", - EncoderConfig: NewDevelopmentEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// Build constructs a logger from the Config and Options. -func (cfg Config) Build(opts ...Option) (*Logger, error) { - enc, err := cfg.buildEncoder() - if err != nil { - return nil, err - } - - sink, errSink, err := cfg.openSinks() - if err != nil { - return nil, err - } - - log := New( - zapcore.NewCore(enc, sink, cfg.Level), - cfg.buildOptions(errSink)..., - ) - if len(opts) > 0 { - log = log.WithOptions(opts...) - } - return log, nil -} - -func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { - opts := []Option{ErrorOutput(errSink)} - - if cfg.Development { - opts = append(opts, Development()) - } - - if !cfg.DisableCaller { - opts = append(opts, AddCaller()) - } - - stackLevel := ErrorLevel - if cfg.Development { - stackLevel = WarnLevel - } - if !cfg.DisableStacktrace { - opts = append(opts, AddStacktrace(stackLevel)) - } - - if cfg.Sampling != nil { - opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) - })) - } - - if len(cfg.InitialFields) > 0 { - fs := make([]Field, 0, len(cfg.InitialFields)) - keys := make([]string, 0, len(cfg.InitialFields)) - for k := range cfg.InitialFields { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - fs = append(fs, Any(k, cfg.InitialFields[k])) - } - opts = append(opts, Fields(fs...)) - } - - return opts -} - -func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { - sink, closeOut, err := Open(cfg.OutputPaths...) - if err != nil { - return nil, nil, err - } - errSink, _, err := Open(cfg.ErrorOutputPaths...) - if err != nil { - closeOut() - return nil, nil, err - } - return sink, errSink, nil -} - -func (cfg Config) buildEncoder() (zapcore.Encoder, error) { - return newEncoder(cfg.Encoding, cfg.EncoderConfig) -} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go deleted file mode 100644 index 8638dd1b..00000000 --- a/vendor/go.uber.org/zap/doc.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zap provides fast, structured, leveled logging. -// -// For applications that log in the hot path, reflection-based serialization -// and string formatting are prohibitively expensive - they're CPU-intensive -// and make many small allocations. Put differently, using json.Marshal and -// fmt.Fprintf to log tons of interface{} makes your application slow. -// -// Zap takes a different approach. It includes a reflection-free, -// zero-allocation JSON encoder, and the base Logger strives to avoid -// serialization overhead and allocations wherever possible. By building the -// high-level SugaredLogger on that foundation, zap lets users choose when -// they need to count every allocation and when they'd prefer a more familiar, -// loosely typed API. -// -// Choosing a Logger -// -// In contexts where performance is nice, but not critical, use the -// SugaredLogger. It's 4-10x faster than other structured logging packages and -// supports both structured and printf-style logging. Like log15 and go-kit, -// the SugaredLogger's structured logging APIs are loosely typed and accept a -// variadic number of key-value pairs. (For more advanced use cases, they also -// accept strongly typed fields - see the SugaredLogger.With documentation for -// details.) -// sugar := zap.NewExample().Sugar() -// defer sugar.Sync() -// sugar.Infow("failed to fetch URL", -// "url", "http://example.com", -// "attempt", 3, -// "backoff", time.Second, -// ) -// sugar.Infof("failed to fetch URL: %s", "http://example.com") -// -// By default, loggers are unbuffered. However, since zap's low-level APIs -// allow buffering, calling Sync before letting your process exit is a good -// habit. -// -// In the rare contexts where every microsecond and every allocation matter, -// use the Logger. It's even faster than the SugaredLogger and allocates far -// less, but it only supports strongly-typed, structured logging. -// logger := zap.NewExample() -// defer logger.Sync() -// logger.Info("failed to fetch URL", -// zap.String("url", "http://example.com"), -// zap.Int("attempt", 3), -// zap.Duration("backoff", time.Second), -// ) -// -// Choosing between the Logger and SugaredLogger doesn't need to be an -// application-wide decision: converting between the two is simple and -// inexpensive. -// logger := zap.NewExample() -// defer logger.Sync() -// sugar := logger.Sugar() -// plain := sugar.Desugar() -// -// Configuring Zap -// -// The simplest way to build a Logger is to use zap's opinionated presets: -// NewExample, NewProduction, and NewDevelopment. These presets build a logger -// with a single function call: -// logger, err := zap.NewProduction() -// if err != nil { -// log.Fatalf("can't initialize zap logger: %v", err) -// } -// defer logger.Sync() -// -// Presets are fine for small projects, but larger projects and organizations -// naturally require a bit more customization. For most users, zap's Config -// struct strikes the right balance between flexibility and convenience. See -// the package-level BasicConfiguration example for sample code. -// -// More unusual configurations (splitting output between files, sending logs -// to a message queue, etc.) are possible, but require direct use of -// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration -// example for sample code. -// -// Extending Zap -// -// The zap package itself is a relatively thin wrapper around the interfaces -// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., -// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an -// exception aggregation service, like Sentry or Rollbar) typically requires -// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core -// interfaces. See the zapcore documentation for details. -// -// Similarly, package authors can use the high-performance Encoder and Core -// implementations in the zapcore package to build their own loggers. -// -// Frequently Asked Questions -// -// An FAQ covering everything from installation errors to design decisions is -// available at https://github.com/uber-go/zap/blob/master/FAQ.md. -package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go deleted file mode 100644 index 2e9d3c34..00000000 --- a/vendor/go.uber.org/zap/encoder.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "sync" - - "go.uber.org/zap/zapcore" -) - -var ( - errNoEncoderNameSpecified = errors.New("no encoder name specified") - - _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ - "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewConsoleEncoder(encoderConfig), nil - }, - "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewJSONEncoder(encoderConfig), nil - }, - } - _encoderMutex sync.RWMutex -) - -// RegisterEncoder registers an encoder constructor, which the Config struct -// can then reference. By default, the "json" and "console" encoders are -// registered. -// -// Attempting to register an encoder whose name is already taken returns an -// error. -func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { - _encoderMutex.Lock() - defer _encoderMutex.Unlock() - if name == "" { - return errNoEncoderNameSpecified - } - if _, ok := _encoderNameToConstructor[name]; ok { - return fmt.Errorf("encoder already registered for name %q", name) - } - _encoderNameToConstructor[name] = constructor - return nil -} - -func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - _encoderMutex.RLock() - defer _encoderMutex.RUnlock() - if name == "" { - return nil, errNoEncoderNameSpecified - } - constructor, ok := _encoderNameToConstructor[name] - if !ok { - return nil, fmt.Errorf("no encoder registered for name %q", name) - } - return constructor(encoderConfig) -} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go deleted file mode 100644 index 65982a51..00000000 --- a/vendor/go.uber.org/zap/error.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "sync" - - "go.uber.org/zap/zapcore" -) - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Error is shorthand for the common idiom NamedError("error", err). -func Error(err error) Field { - return NamedError("error", err) -} - -// NamedError constructs a field that lazily stores err.Error() under the -// provided key. Errors which also implement fmt.Formatter (like those produced -// by github.com/pkg/errors) will also have their verbose representation stored -// under key+"Verbose". If passed a nil error, the field is a no-op. -// -// For the common case in which the key is simply "error", the Error function -// is shorter and less repetitive. -func NamedError(key string, err error) Field { - if err == nil { - return Skip() - } - return Field{Key: key, Type: zapcore.ErrorType, Interface: err} -} - -type errArray []error - -func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - // To represent each error as an object with an "error" attribute and - // potentially an "errorVerbose" attribute, we need to wrap it in a - // type that implements LogObjectMarshaler. To prevent this from - // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) - elem.error = errs[i] - arr.AppendObject(elem) - elem.error = nil - _errArrayElemPool.Put(elem) - } - return nil -} - -type errArrayElem struct { - error -} - -func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { - // Re-use the error field's logic, which supports non-standard error types. - Error(e.error).AddTo(enc) - return nil -} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go deleted file mode 100644 index 5130e134..00000000 --- a/vendor/go.uber.org/zap/field.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "math" - "time" - - "go.uber.org/zap/zapcore" -) - -// Field is an alias for Field. Aliasing this type dramatically -// improves the navigability of this package's API documentation. -type Field = zapcore.Field - -// Skip constructs a no-op field, which is often useful when handling invalid -// inputs in other Field constructors. -func Skip() Field { - return Field{Type: zapcore.SkipType} -} - -// Binary constructs a field that carries an opaque binary blob. -// -// Binary data is serialized in an encoding-appropriate format. For example, -// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, -// use ByteString. -func Binary(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.BinaryType, Interface: val} -} - -// Bool constructs a field that carries a bool. -func Bool(key string, val bool) Field { - var ival int64 - if val { - ival = 1 - } - return Field{Key: key, Type: zapcore.BoolType, Integer: ival} -} - -// ByteString constructs a field that carries UTF-8 encoded text as a []byte. -// To log opaque binary blobs (which aren't necessarily valid UTF-8), use -// Binary. -func ByteString(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} -} - -// Complex128 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex128 to -// interface{}). -func Complex128(key string, val complex128) Field { - return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} -} - -// Complex64 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex64 to -// interface{}). -func Complex64(key string, val complex64) Field { - return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} -} - -// Float64 constructs a field that carries a float64. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float64(key string, val float64) Field { - return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} -} - -// Float32 constructs a field that carries a float32. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float32(key string, val float32) Field { - return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} -} - -// Int constructs a field with the given key and value. -func Int(key string, val int) Field { - return Int64(key, int64(val)) -} - -// Int64 constructs a field with the given key and value. -func Int64(key string, val int64) Field { - return Field{Key: key, Type: zapcore.Int64Type, Integer: val} -} - -// Int32 constructs a field with the given key and value. -func Int32(key string, val int32) Field { - return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} -} - -// Int16 constructs a field with the given key and value. -func Int16(key string, val int16) Field { - return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} -} - -// Int8 constructs a field with the given key and value. -func Int8(key string, val int8) Field { - return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} -} - -// String constructs a field with the given key and value. -func String(key string, val string) Field { - return Field{Key: key, Type: zapcore.StringType, String: val} -} - -// Uint constructs a field with the given key and value. -func Uint(key string, val uint) Field { - return Uint64(key, uint64(val)) -} - -// Uint64 constructs a field with the given key and value. -func Uint64(key string, val uint64) Field { - return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} -} - -// Uint32 constructs a field with the given key and value. -func Uint32(key string, val uint32) Field { - return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} -} - -// Uint16 constructs a field with the given key and value. -func Uint16(key string, val uint16) Field { - return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} -} - -// Uint8 constructs a field with the given key and value. -func Uint8(key string, val uint8) Field { - return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} -} - -// Uintptr constructs a field with the given key and value. -func Uintptr(key string, val uintptr) Field { - return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} -} - -// Reflect constructs a field with the given key and an arbitrary object. It uses -// an encoding-appropriate, reflection-based function to lazily serialize nearly -// any object into the logging context, but it's relatively slow and -// allocation-heavy. Outside tests, Any is always a better choice. -// -// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect -// includes the error message in the final log output. -func Reflect(key string, val interface{}) Field { - return Field{Key: key, Type: zapcore.ReflectType, Interface: val} -} - -// Namespace creates a named, isolated scope within the logger's context. All -// subsequent fields will be added to the new namespace. -// -// This helps prevent key collisions when injecting loggers into sub-components -// or third-party libraries. -func Namespace(key string) Field { - return Field{Key: key, Type: zapcore.NamespaceType} -} - -// Stringer constructs a field with the given key and the output of the value's -// String method. The Stringer's String method is called lazily. -func Stringer(key string, val fmt.Stringer) Field { - return Field{Key: key, Type: zapcore.StringerType, Interface: val} -} - -// Time constructs a Field with the given key and value. The encoder -// controls how the time is serialized. -func Time(key string, val time.Time) Field { - return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} -} - -// Stack constructs a field that stores a stacktrace of the current goroutine -// under provided key. Keep in mind that taking a stacktrace is eager and -// expensive (relatively speaking); this function both makes an allocation and -// takes about two microseconds. -func Stack(key string) Field { - // Returning the stacktrace as a string costs an allocation, but saves us - // from expanding the zapcore.Field union struct to include a byte slice. Since - // taking a stacktrace is already so expensive (~10us), the extra allocation - // is okay. - return String(key, takeStacktrace()) -} - -// Duration constructs a field with the given key and value. The encoder -// controls how the duration is serialized. -func Duration(key string, val time.Duration) Field { - return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} -} - -// Object constructs a field with the given key and ObjectMarshaler. It -// provides a flexible, but still type-safe and efficient, way to add map- or -// struct-like user-defined types to the logging context. The struct's -// MarshalLogObject method is called lazily. -func Object(key string, val zapcore.ObjectMarshaler) Field { - return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} -} - -// Any takes a key and an arbitrary value and chooses the best way to represent -// them as a field, falling back to a reflection-based approach only if -// necessary. -// -// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between -// them. To minimize surprises, []byte values are treated as binary blobs, byte -// values are treated as uint8, and runes are always treated as integers. -func Any(key string, value interface{}) Field { - switch val := value.(type) { - case zapcore.ObjectMarshaler: - return Object(key, val) - case zapcore.ArrayMarshaler: - return Array(key, val) - case bool: - return Bool(key, val) - case []bool: - return Bools(key, val) - case complex128: - return Complex128(key, val) - case []complex128: - return Complex128s(key, val) - case complex64: - return Complex64(key, val) - case []complex64: - return Complex64s(key, val) - case float64: - return Float64(key, val) - case []float64: - return Float64s(key, val) - case float32: - return Float32(key, val) - case []float32: - return Float32s(key, val) - case int: - return Int(key, val) - case []int: - return Ints(key, val) - case int64: - return Int64(key, val) - case []int64: - return Int64s(key, val) - case int32: - return Int32(key, val) - case []int32: - return Int32s(key, val) - case int16: - return Int16(key, val) - case []int16: - return Int16s(key, val) - case int8: - return Int8(key, val) - case []int8: - return Int8s(key, val) - case string: - return String(key, val) - case []string: - return Strings(key, val) - case uint: - return Uint(key, val) - case []uint: - return Uints(key, val) - case uint64: - return Uint64(key, val) - case []uint64: - return Uint64s(key, val) - case uint32: - return Uint32(key, val) - case []uint32: - return Uint32s(key, val) - case uint16: - return Uint16(key, val) - case []uint16: - return Uint16s(key, val) - case uint8: - return Uint8(key, val) - case []byte: - return Binary(key, val) - case uintptr: - return Uintptr(key, val) - case []uintptr: - return Uintptrs(key, val) - case time.Time: - return Time(key, val) - case []time.Time: - return Times(key, val) - case time.Duration: - return Duration(key, val) - case []time.Duration: - return Durations(key, val) - case error: - return NamedError(key, val) - case []error: - return Errors(key, val) - case fmt.Stringer: - return Stringer(key, val) - default: - return Reflect(key, val) - } -} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go deleted file mode 100644 index 13128750..00000000 --- a/vendor/go.uber.org/zap/flag.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "flag" - - "go.uber.org/zap/zapcore" -) - -// LevelFlag uses the standard library's flag.Var to declare a global flag -// with the specified name, default, and usage guidance. The returned value is -// a pointer to the value of the flag. -// -// If you don't want to use the flag package's global state, you can use any -// non-nil *Level as a flag.Value with your own *flag.FlagSet. -func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { - lvl := defaultLevel - flag.Var(&lvl, name, usage) - return &lvl -} diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go deleted file mode 100644 index d02232e3..00000000 --- a/vendor/go.uber.org/zap/global.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "bytes" - "fmt" - "log" - "os" - "sync" - - "go.uber.org/zap/zapcore" -) - -const ( - _stdLogDefaultDepth = 2 - _loggerWriterDepth = 2 - _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + - "https://github.com/uber-go/zap/issues/new and reference this error: %v" -) - -var ( - _globalMu sync.RWMutex - _globalL = NewNop() - _globalS = _globalL.Sugar() -) - -// L returns the global Logger, which can be reconfigured with ReplaceGlobals. -// It's safe for concurrent use. -func L() *Logger { - _globalMu.RLock() - l := _globalL - _globalMu.RUnlock() - return l -} - -// S returns the global SugaredLogger, which can be reconfigured with -// ReplaceGlobals. It's safe for concurrent use. -func S() *SugaredLogger { - _globalMu.RLock() - s := _globalS - _globalMu.RUnlock() - return s -} - -// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a -// function to restore the original values. It's safe for concurrent use. -func ReplaceGlobals(logger *Logger) func() { - _globalMu.Lock() - prev := _globalL - _globalL = logger - _globalS = logger.Sugar() - _globalMu.Unlock() - return func() { ReplaceGlobals(prev) } -} - -// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at -// InfoLevel. To redirect the standard library's package-global logging -// functions, use RedirectStdLog instead. -func NewStdLog(l *Logger) *log.Logger { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - f := logger.Info - return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) -} - -// NewStdLogAt returns *log.Logger which writes to supplied zap logger at -// required level. -func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil -} - -// RedirectStdLog redirects output from the standard library's package-global -// logger to the supplied logger at InfoLevel. Since zap already handles caller -// annotations, timestamps, etc., it automatically disables the standard -// library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLog(l *Logger) func() { - f, err := redirectStdLogAt(l, InfoLevel) - if err != nil { - // Can't get here, since passing InfoLevel to redirectStdLogAt always - // works. - panic(fmt.Sprintf(_programmerErrorTemplate, err)) - } - return f -} - -// RedirectStdLogAt redirects output from the standard library's package-global -// logger to the supplied logger at the specified level. Since zap already -// handles caller annotations, timestamps, etc., it automatically disables the -// standard library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - return redirectStdLogAt(l, level) -} - -func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - flags := log.Flags() - prefix := log.Prefix() - log.SetFlags(0) - log.SetPrefix("") - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - log.SetOutput(&loggerWriter{logFunc}) - return func() { - log.SetFlags(flags) - log.SetPrefix(prefix) - log.SetOutput(os.Stderr) - }, nil -} - -func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { - switch lvl { - case DebugLevel: - return logger.Debug, nil - case InfoLevel: - return logger.Info, nil - case WarnLevel: - return logger.Warn, nil - case ErrorLevel: - return logger.Error, nil - case DPanicLevel: - return logger.DPanic, nil - case PanicLevel: - return logger.Panic, nil - case FatalLevel: - return logger.Fatal, nil - } - return nil, fmt.Errorf("unrecognized level: %q", lvl) -} - -type loggerWriter struct { - logFunc func(msg string, fields ...Field) -} - -func (l *loggerWriter) Write(p []byte) (int, error) { - p = bytes.TrimSpace(p) - l.logFunc(string(p)) - return len(p), nil -} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go deleted file mode 100644 index 1b0ecaca..00000000 --- a/vendor/go.uber.org/zap/http_handler.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "encoding/json" - "fmt" - "net/http" - - "go.uber.org/zap/zapcore" -) - -// ServeHTTP is a simple JSON endpoint that can report on or change the current -// logging level. -// -// GET requests return a JSON description of the current logging level. PUT -// requests change the logging level and expect a payload like: -// {"level":"info"} -// -// It's perfectly safe to change the logging level while a program is running. -func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { - type errorResponse struct { - Error string `json:"error"` - } - type payload struct { - Level *zapcore.Level `json:"level"` - } - - enc := json.NewEncoder(w) - - switch r.Method { - - case http.MethodGet: - current := lvl.Level() - enc.Encode(payload{Level: ¤t}) - - case http.MethodPut: - var req payload - - if errmess := func() string { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return fmt.Sprintf("Request body must be well-formed JSON: %v", err) - } - if req.Level == nil { - return "Must specify a logging level." - } - return "" - }(); errmess != "" { - w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: errmess}) - return - } - - lvl.SetLevel(*req.Level) - enc.Encode(req) - - default: - w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ - Error: "Only GET and PUT are supported.", - }) - } -} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go deleted file mode 100644 index dad583aa..00000000 --- a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package bufferpool houses zap's shared internal buffer pool. Third-party -// packages can recreate the same functionality with buffers.NewPool. -package bufferpool - -import "go.uber.org/zap/buffer" - -var ( - _pool = buffer.NewPool() - // Get retrieves a buffer from the pool, creating one if necessary. - Get = _pool.Get -) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go deleted file mode 100644 index c4d5d02a..00000000 --- a/vendor/go.uber.org/zap/internal/color/color.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package color adds coloring functionality for TTY output. -package color - -import "fmt" - -// Foreground colors. -const ( - Black Color = iota + 30 - Red - Green - Yellow - Blue - Magenta - Cyan - White -) - -// Color represents a text color. -type Color uint8 - -// Add adds the coloring to the given string. -func (c Color) Add(s string) string { - return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) -} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go deleted file mode 100644 index dfc5b05f..00000000 --- a/vendor/go.uber.org/zap/internal/exit/exit.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package exit provides stubs so that unit tests can exercise code that calls -// os.Exit(1). -package exit - -import "os" - -var real = func() { os.Exit(1) } - -// Exit normally terminates the process by calling os.Exit(1). If the package -// is stubbed, it instead records a call in the testing spy. -func Exit() { - real() -} - -// A StubbedExit is a testing fake for os.Exit. -type StubbedExit struct { - Exited bool - prev func() -} - -// Stub substitutes a fake for the call to os.Exit(1). -func Stub() *StubbedExit { - s := &StubbedExit{prev: real} - real = s.exit - return s -} - -// WithStub runs the supplied function with Exit stubbed. It returns the stub -// used, so that users can test whether the process would have crashed. -func WithStub(f func()) *StubbedExit { - s := Stub() - defer s.Unstub() - f() - return s -} - -// Unstub restores the previous exit function. -func (se *StubbedExit) Unstub() { - real = se.prev -} - -func (se *StubbedExit) exit() { - se.Exited = true -} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go deleted file mode 100644 index 3567a9a1..00000000 --- a/vendor/go.uber.org/zap/level.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "go.uber.org/atomic" - "go.uber.org/zap/zapcore" -) - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel = zapcore.DebugLevel - // InfoLevel is the default logging priority. - InfoLevel = zapcore.InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel = zapcore.WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel = zapcore.ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel = zapcore.DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel = zapcore.PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel = zapcore.FatalLevel -) - -// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with -// an anonymous function. -// -// It's particularly useful when splitting log output between different -// outputs (e.g., standard error and standard out). For sample code, see the -// package-level AdvancedConfiguration example. -type LevelEnablerFunc func(zapcore.Level) bool - -// Enabled calls the wrapped function. -func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } - -// An AtomicLevel is an atomically changeable, dynamic logging level. It lets -// you safely change the log level of a tree of loggers (the root logger and -// any children created by adding context) at runtime. -// -// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to -// alter its level. -// -// AtomicLevels must be created with the NewAtomicLevel constructor to allocate -// their internal atomic pointer. -type AtomicLevel struct { - l *atomic.Int32 -} - -// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging -// enabled. -func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } -} - -// NewAtomicLevelAt is a convenience function that creates an AtomicLevel -// and then calls SetLevel with the given level. -func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { - a := NewAtomicLevel() - a.SetLevel(l) - return a -} - -// Enabled implements the zapcore.LevelEnabler interface, which allows the -// AtomicLevel to be used in place of traditional static levels. -func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { - return lvl.Level().Enabled(l) -} - -// Level returns the minimum enabled log level. -func (lvl AtomicLevel) Level() zapcore.Level { - return zapcore.Level(int8(lvl.l.Load())) -} - -// SetLevel alters the logging level. -func (lvl AtomicLevel) SetLevel(l zapcore.Level) { - lvl.l.Store(int32(l)) -} - -// String returns the string representation of the underlying Level. -func (lvl AtomicLevel) String() string { - return lvl.Level().String() -} - -// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text -// representations as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl *AtomicLevel) UnmarshalText(text []byte) error { - if lvl.l == nil { - lvl.l = &atomic.Int32{} - } - - var l zapcore.Level - if err := l.UnmarshalText(text); err != nil { - return err - } - - lvl.SetLevel(l) - return nil -} - -// MarshalText marshals the AtomicLevel to a byte slice. It uses the same -// text representation as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl AtomicLevel) MarshalText() (text []byte, err error) { - return lvl.Level().MarshalText() -} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go deleted file mode 100644 index dc8f6e3a..00000000 --- a/vendor/go.uber.org/zap/logger.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - "time" - - "go.uber.org/zap/zapcore" -) - -// A Logger provides fast, leveled, structured logging. All methods are safe -// for concurrent use. -// -// The Logger is designed for contexts in which every microsecond and every -// allocation matters, so its API intentionally favors performance and type -// safety over brevity. For most applications, the SugaredLogger strikes a -// better balance between performance and ergonomics. -type Logger struct { - core zapcore.Core - - development bool - name string - errorOutput zapcore.WriteSyncer - - addCaller bool - addStack zapcore.LevelEnabler - - callerSkip int -} - -// New constructs a new Logger from the provided zapcore.Core and Options. If -// the passed zapcore.Core is nil, it falls back to using a no-op -// implementation. -// -// This is the most flexible way to construct a Logger, but also the most -// verbose. For typical use cases, the highly-opinionated presets -// (NewProduction, NewDevelopment, and NewExample) or the Config struct are -// more convenient. -// -// For sample code, see the package-level AdvancedConfiguration example. -func New(core zapcore.Core, options ...Option) *Logger { - if core == nil { - return NewNop() - } - log := &Logger{ - core: core, - errorOutput: zapcore.Lock(os.Stderr), - addStack: zapcore.FatalLevel + 1, - } - return log.WithOptions(options...) -} - -// NewNop returns a no-op Logger. It never writes out logs or internal errors, -// and it never runs user-defined hooks. -// -// Using WithOptions to replace the Core or error output of a no-op Logger can -// re-enable logging. -func NewNop() *Logger { - return &Logger{ - core: zapcore.NewNopCore(), - errorOutput: zapcore.AddSync(ioutil.Discard), - addStack: zapcore.FatalLevel + 1, - } -} - -// NewProduction builds a sensible production Logger that writes InfoLevel and -// above logs to standard error as JSON. -// -// It's a shortcut for NewProductionConfig().Build(...Option). -func NewProduction(options ...Option) (*Logger, error) { - return NewProductionConfig().Build(options...) -} - -// NewDevelopment builds a development Logger that writes DebugLevel and above -// logs to standard error in a human-friendly format. -// -// It's a shortcut for NewDevelopmentConfig().Build(...Option). -func NewDevelopment(options ...Option) (*Logger, error) { - return NewDevelopmentConfig().Build(options...) -} - -// NewExample builds a Logger that's designed for use in zap's testable -// examples. It writes DebugLevel and above logs to standard out as JSON, but -// omits the timestamp and calling function to keep example output -// short and deterministic. -func NewExample(options ...Option) *Logger { - encoderCfg := zapcore.EncoderConfig{ - MessageKey: "msg", - LevelKey: "level", - NameKey: "logger", - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - } - core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) - return New(core).WithOptions(options...) -} - -// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, -// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a -// single application to use both Loggers and SugaredLoggers, converting -// between them on the boundaries of performance-sensitive code. -func (log *Logger) Sugar() *SugaredLogger { - core := log.clone() - core.callerSkip += 2 - return &SugaredLogger{core} -} - -// Named adds a new path segment to the logger's name. Segments are joined by -// periods. By default, Loggers are unnamed. -func (log *Logger) Named(s string) *Logger { - if s == "" { - return log - } - l := log.clone() - if log.name == "" { - l.name = s - } else { - l.name = strings.Join([]string{l.name, s}, ".") - } - return l -} - -// WithOptions clones the current Logger, applies the supplied Options, and -// returns the resulting Logger. It's safe to use concurrently. -func (log *Logger) WithOptions(opts ...Option) *Logger { - c := log.clone() - for _, opt := range opts { - opt.apply(c) - } - return c -} - -// With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. -func (log *Logger) With(fields ...Field) *Logger { - if len(fields) == 0 { - return log - } - l := log.clone() - l.core = l.core.With(fields) - return l -} - -// Check returns a CheckedEntry if logging a message at the specified level -// is enabled. It's a completely optional optimization; in high-performance -// applications, Check can help avoid allocating a slice to hold fields. -func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - return log.check(lvl, msg) -} - -// Debug logs a message at DebugLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Debug(msg string, fields ...Field) { - if ce := log.check(DebugLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Info logs a message at InfoLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Info(msg string, fields ...Field) { - if ce := log.check(InfoLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Warn logs a message at WarnLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Warn(msg string, fields ...Field) { - if ce := log.check(WarnLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Error logs a message at ErrorLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Error(msg string, fields ...Field) { - if ce := log.check(ErrorLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// DPanic logs a message at DPanicLevel. The message includes any fields -// passed at the log site, as well as any fields accumulated on the logger. -// -// If the logger is in development mode, it then panics (DPanic means -// "development panic"). This is useful for catching errors that are -// recoverable, but shouldn't ever happen. -func (log *Logger) DPanic(msg string, fields ...Field) { - if ce := log.check(DPanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Panic logs a message at PanicLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then panics, even if logging at PanicLevel is disabled. -func (log *Logger) Panic(msg string, fields ...Field) { - if ce := log.check(PanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Fatal logs a message at FatalLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then calls os.Exit(1), even if logging at FatalLevel is -// disabled. -func (log *Logger) Fatal(msg string, fields ...Field) { - if ce := log.check(FatalLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Sync calls the underlying Core's Sync method, flushing any buffered log -// entries. Applications should take care to call Sync before exiting. -func (log *Logger) Sync() error { - return log.core.Sync() -} - -// Core returns the Logger's underlying zapcore.Core. -func (log *Logger) Core() zapcore.Core { - return log.core -} - -func (log *Logger) clone() *Logger { - copy := *log - return © -} - -func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). - const callerSkipOffset = 2 - - // Create basic checked entry thru the core; this will be non-nil if the - // log message will actually be written somewhere. - ent := zapcore.Entry{ - LoggerName: log.name, - Time: time.Now(), - Level: lvl, - Message: msg, - } - ce := log.core.Check(ent, nil) - willWrite := ce != nil - - // Set up any required terminal behavior. - switch ent.Level { - case zapcore.PanicLevel: - ce = ce.Should(ent, zapcore.WriteThenPanic) - case zapcore.FatalLevel: - ce = ce.Should(ent, zapcore.WriteThenFatal) - case zapcore.DPanicLevel: - if log.development { - ce = ce.Should(ent, zapcore.WriteThenPanic) - } - } - - // Only do further annotation if we're going to write this message; checked - // entries that exist only for terminal behavior don't benefit from - // annotation. - if !willWrite { - return ce - } - - // Thread the error output through to the CheckedEntry. - ce.ErrorOutput = log.errorOutput - if log.addCaller { - ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) - if !ce.Entry.Caller.Defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) - log.errorOutput.Sync() - } - } - if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = Stack("").String - } - - return ce -} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go deleted file mode 100644 index 7a6b0fca..00000000 --- a/vendor/go.uber.org/zap/options.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import "go.uber.org/zap/zapcore" - -// An Option configures a Logger. -type Option interface { - apply(*Logger) -} - -// optionFunc wraps a func so it satisfies the Option interface. -type optionFunc func(*Logger) - -func (f optionFunc) apply(log *Logger) { - f(log) -} - -// WrapCore wraps or replaces the Logger's underlying zapcore.Core. -func WrapCore(f func(zapcore.Core) zapcore.Core) Option { - return optionFunc(func(log *Logger) { - log.core = f(log.core) - }) -} - -// Hooks registers functions which will be called each time the Logger writes -// out an Entry. Repeated use of Hooks is additive. -// -// Hooks are useful for simple side effects, like capturing metrics for the -// number of emitted logs. More complex side effects, including anything that -// requires access to the Entry's structured fields, should be implemented as -// a zapcore.Core instead. See zapcore.RegisterHooks for details. -func Hooks(hooks ...func(zapcore.Entry) error) Option { - return optionFunc(func(log *Logger) { - log.core = zapcore.RegisterHooks(log.core, hooks...) - }) -} - -// Fields adds fields to the Logger. -func Fields(fs ...Field) Option { - return optionFunc(func(log *Logger) { - log.core = log.core.With(fs) - }) -} - -// ErrorOutput sets the destination for errors generated by the Logger. Note -// that this option only affects internal errors; for sample code that sends -// error-level logs to a different location from info- and debug-level logs, -// see the package-level AdvancedConfiguration example. -// -// The supplied WriteSyncer must be safe for concurrent use. The Open and -// zapcore.Lock functions are the simplest ways to protect files with a mutex. -func ErrorOutput(w zapcore.WriteSyncer) Option { - return optionFunc(func(log *Logger) { - log.errorOutput = w - }) -} - -// Development puts the logger in development mode, which makes DPanic-level -// logs panic instead of simply logging an error. -func Development() Option { - return optionFunc(func(log *Logger) { - log.development = true - }) -} - -// AddCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller. -func AddCaller() Option { - return optionFunc(func(log *Logger) { - log.addCaller = true - }) -} - -// AddCallerSkip increases the number of callers skipped by caller annotation -// (as enabled by the AddCaller option). When building wrappers around the -// Logger and SugaredLogger, supplying this Option prevents zap from always -// reporting the wrapper code as the caller. -func AddCallerSkip(skip int) Option { - return optionFunc(func(log *Logger) { - log.callerSkip += skip - }) -} - -// AddStacktrace configures the Logger to record a stack trace for all messages at -// or above a given level. -func AddStacktrace(lvl zapcore.LevelEnabler) Option { - return optionFunc(func(log *Logger) { - log.addStack = lvl - }) -} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go deleted file mode 100644 index ff0becfe..00000000 --- a/vendor/go.uber.org/zap/sink.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "io" - "net/url" - "os" - "strings" - "sync" - - "go.uber.org/zap/zapcore" -) - -const schemeFile = "file" - -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} - -// Sink defines the interface to write to and close logger destinations. -type Sink interface { - zapcore.WriteSyncer - io.Closer -} - -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - -type errSinkNotFound struct { - scheme string -} - -func (e *errSinkNotFound) Error() string { - return fmt.Sprintf("no sink found for scheme %q", e.scheme) -} - -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - if scheme == "" { - return errors.New("can't register a sink factory for empty string") - } - normalized, err := normalizeScheme(scheme) - if err != nil { - return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) - } - if _, ok := _sinkFactories[normalized]; ok { - return fmt.Errorf("sink factory already registered for scheme %q", normalized) - } - _sinkFactories[normalized] = factory - return nil -} - -func newSink(rawURL string) (Sink, error) { - u, err := url.Parse(rawURL) - if err != nil { - return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) - } - if u.Scheme == "" { - u.Scheme = schemeFile - } - - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() - if !ok { - return nil, &errSinkNotFound{u.Scheme} - } - return factory(u) -} - -func newFileSink(u *url.URL) (Sink, error) { - if u.User != nil { - return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) - } - if u.Fragment != "" { - return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) - } - if u.RawQuery != "" { - return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) - } - // Error messages are better if we check hostname and port separately. - if u.Port() != "" { - return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) - } - if hn := u.Hostname(); hn != "" && hn != "localhost" { - return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) - } - switch u.Path { - case "stdout": - return nopCloserSink{os.Stdout}, nil - case "stderr": - return nopCloserSink{os.Stderr}, nil - } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) -} - -func normalizeScheme(s string) (string, error) { - // https://tools.ietf.org/html/rfc3986#section-3.1 - s = strings.ToLower(s) - if first := s[0]; 'a' > first || 'z' < first { - return "", errors.New("must start with a letter") - } - for i := 1; i < len(s); i++ { // iterate over bytes, not runes - c := s[i] - switch { - case 'a' <= c && c <= 'z': - continue - case '0' <= c && c <= '9': - continue - case c == '.' || c == '+' || c == '-': - continue - } - return "", fmt.Errorf("may not contain %q", c) - } - return s, nil -} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go deleted file mode 100644 index 100fac21..00000000 --- a/vendor/go.uber.org/zap/stacktrace.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "runtime" - "strings" - "sync" - - "go.uber.org/zap/internal/bufferpool" -) - -const _zapPackage = "go.uber.org/zap" - -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } - - // We add "." and "/" suffixes to the package name to ensure we only match - // the exact package and not any package with the same prefix. - _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") - _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) -) - -func takeStacktrace() string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Counters and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) - } - - i := 0 - skipZapFrames := true // skip all consecutive zap frames at the beginning. - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) - - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if skipZapFrames && isZapFrame(frame.Function) { - continue - } else { - skipZapFrames = false - } - - if i != 0 { - buffer.AppendByte('\n') - } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) - } - - return buffer.String() -} - -func isZapFrame(function string) bool { - for _, prefix := range _zapStacktracePrefixes { - if strings.HasPrefix(function, prefix) { - return true - } - } - - // We can't use a prefix match here since the location of the vendor - // directory affects the prefix. Instead we do a contains match. - for _, contains := range _zapStacktraceVendorContains { - if strings.Contains(function, contains) { - return true - } - } - - return false -} - -type programCounters struct { - pcs []uintptr -} - -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} -} - -func addPrefix(prefix string, ss ...string) []string { - withPrefix := make([]string, len(ss)) - for i, s := range ss { - withPrefix[i] = prefix + s - } - return withPrefix -} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go deleted file mode 100644 index 77ca227f..00000000 --- a/vendor/go.uber.org/zap/sugar.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -const ( - _oddNumberErrMsg = "Ignored key without a value." - _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." -) - -// A SugaredLogger wraps the base Logger functionality in a slower, but less -// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar -// method. -// -// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. -// For each log level, it exposes three methods: one for loosely-typed -// structured logging, one for println-style formatting, and one for -// printf-style formatting. For example, SugaredLoggers can produce InfoLevel -// output with Infow ("info with" structured context), Info, or Infof. -type SugaredLogger struct { - base *Logger -} - -// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring -// is quite inexpensive, so it's reasonable for a single application to use -// both Loggers and SugaredLoggers, converting between them on the boundaries -// of performance-sensitive code. -func (s *SugaredLogger) Desugar() *Logger { - base := s.base.clone() - base.callerSkip -= 2 - return base -} - -// Named adds a sub-scope to the logger's name. See Logger.Named for details. -func (s *SugaredLogger) Named(name string) *SugaredLogger { - return &SugaredLogger{base: s.base.Named(name)} -} - -// With adds a variadic number of fields to the logging context. It accepts a -// mix of strongly-typed Field objects and loosely-typed key-value pairs. When -// processing pairs, the first element of the pair is used as the field key -// and the second as the field value. -// -// For example, -// sugaredLogger.With( -// "hello", "world", -// "failure", errors.New("oh no"), -// Stack(), -// "count", 42, -// "user", User{Name: "alice"}, -// ) -// is the equivalent of -// unsugared.With( -// String("hello", "world"), -// String("failure", "oh no"), -// Stack(), -// Int("count", 42), -// Object("user", User{Name: "alice"}), -// ) -// -// Note that the keys in key-value pairs should be strings. In development, -// passing a non-string key panics. In production, the logger is more -// forgiving: a separate error is logged, but the key-value pair is skipped -// and execution continues. Passing an orphaned key triggers similar behavior: -// panics in development and errors in production. -func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { - return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} -} - -// Debug uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Debug(args ...interface{}) { - s.log(DebugLevel, "", args, nil) -} - -// Info uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Info(args ...interface{}) { - s.log(InfoLevel, "", args, nil) -} - -// Warn uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Warn(args ...interface{}) { - s.log(WarnLevel, "", args, nil) -} - -// Error uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Error(args ...interface{}) { - s.log(ErrorLevel, "", args, nil) -} - -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanic(args ...interface{}) { - s.log(DPanicLevel, "", args, nil) -} - -// Panic uses fmt.Sprint to construct and log a message, then panics. -func (s *SugaredLogger) Panic(args ...interface{}) { - s.log(PanicLevel, "", args, nil) -} - -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. -func (s *SugaredLogger) Fatal(args ...interface{}) { - s.log(FatalLevel, "", args, nil) -} - -// Debugf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Debugf(template string, args ...interface{}) { - s.log(DebugLevel, template, args, nil) -} - -// Infof uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Infof(template string, args ...interface{}) { - s.log(InfoLevel, template, args, nil) -} - -// Warnf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Warnf(template string, args ...interface{}) { - s.log(WarnLevel, template, args, nil) -} - -// Errorf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Errorf(template string, args ...interface{}) { - s.log(ErrorLevel, template, args, nil) -} - -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { - s.log(DPanicLevel, template, args, nil) -} - -// Panicf uses fmt.Sprintf to log a templated message, then panics. -func (s *SugaredLogger) Panicf(template string, args ...interface{}) { - s.log(PanicLevel, template, args, nil) -} - -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. -func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { - s.log(FatalLevel, template, args, nil) -} - -// Debugw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -// -// When debug-level logging is disabled, this is much faster than -// s.With(keysAndValues).Debug(msg) -func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { - s.log(DebugLevel, msg, nil, keysAndValues) -} - -// Infow logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { - s.log(InfoLevel, msg, nil, keysAndValues) -} - -// Warnw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { - s.log(WarnLevel, msg, nil, keysAndValues) -} - -// Errorw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { - s.log(ErrorLevel, msg, nil, keysAndValues) -} - -// DPanicw logs a message with some additional context. In development, the -// logger then panics. (See DPanicLevel for details.) The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { - s.log(DPanicLevel, msg, nil, keysAndValues) -} - -// Panicw logs a message with some additional context, then panics. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { - s.log(PanicLevel, msg, nil, keysAndValues) -} - -// Fatalw logs a message with some additional context, then calls os.Exit. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { - s.log(FatalLevel, msg, nil, keysAndValues) -} - -// Sync flushes any buffered log entries. -func (s *SugaredLogger) Sync() error { - return s.base.Sync() -} - -func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { - // If logging at this level is completely disabled, skip the overhead of - // string formatting. - if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { - return - } - - // Format with Sprint, Sprintf, or neither. - msg := template - if msg == "" && len(fmtArgs) > 0 { - msg = fmt.Sprint(fmtArgs...) - } else if msg != "" && len(fmtArgs) > 0 { - msg = fmt.Sprintf(template, fmtArgs...) - } - - if ce := s.base.Check(lvl, msg); ce != nil { - ce.Write(s.sweetenFields(context)...) - } -} - -func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { - if len(args) == 0 { - return nil - } - - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs - - for i := 0; i < len(args); { - // This is a strongly-typed field. Consume it and move on. - if f, ok := args[i].(Field); ok { - fields = append(fields, f) - i++ - continue - } - - // Make sure this element isn't a dangling key. - if i == len(args)-1 { - s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) - break - } - - // Consume this value and the next, treating them as a key-value pair. If the - // key isn't a string, add this pair to the slice of invalid pairs. - key, val := args[i], args[i+1] - if keyStr, ok := key.(string); !ok { - // Subsequent errors are likely, so allocate once up front. - if cap(invalid) == 0 { - invalid = make(invalidPairs, 0, len(args)/2) - } - invalid = append(invalid, invalidPair{i, key, val}) - } else { - fields = append(fields, Any(keyStr, val)) - } - i += 2 - } - - // If we encountered any invalid key-value pairs, log an error. - if len(invalid) > 0 { - s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) - } - return fields -} - -type invalidPair struct { - position int - key, value interface{} -} - -func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { - enc.AddInt64("position", int64(p.position)) - Any("key", p.key).AddTo(enc) - Any("value", p.value).AddTo(enc) - return nil -} - -type invalidPairs []invalidPair - -func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { - var err error - for i := range ps { - err = multierr.Append(err, enc.AppendObject(ps[i])) - } - return err -} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go deleted file mode 100644 index c5a1f162..00000000 --- a/vendor/go.uber.org/zap/time.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import "time" - -func timeToMillis(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond) -} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go deleted file mode 100644 index 86a709ab..00000000 --- a/vendor/go.uber.org/zap/writer.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io" - "io/ioutil" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -// Open is a high-level wrapper that takes a variadic number of URLs, opens or -// creates each of the specified resources, and combines them into a locked -// WriteSyncer. It also returns any error encountered and a function to close -// any opened files. -// -// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a -// scheme and URLs with the "file" scheme. Third-party code may register -// factories for other schemes using RegisterSink. -// -// URLs with the "file" scheme must use absolute paths on the local -// filesystem. No user, password, port, fragments, or query parameters are -// allowed, and the hostname must be empty or "localhost". -// -// Since it's common to write logs to the local filesystem, URLs without a -// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without -// a scheme, the special paths "stdout" and "stderr" are interpreted as -// os.Stdout and os.Stderr. When specified without a scheme, relative file -// paths also work. -func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) - if err != nil { - return nil, nil, err - } - - writer := CombineWriteSyncers(writers...) - return writer, close, nil -} - -func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { - writers := make([]zapcore.WriteSyncer, 0, len(paths)) - closers := make([]io.Closer, 0, len(paths)) - close := func() { - for _, c := range closers { - c.Close() - } - } - - var openErr error - for _, path := range paths { - sink, err := newSink(path) - if err != nil { - openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) - continue - } - writers = append(writers, sink) - closers = append(closers, sink) - } - if openErr != nil { - close() - return writers, nil, openErr - } - - return writers, close, nil -} - -// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a -// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op -// WriteSyncer. -// -// It's provided purely as a convenience; the result is no different from -// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. -func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { - if len(writers) == 0 { - return zapcore.AddSync(ioutil.Discard) - } - return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) -} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go deleted file mode 100644 index b7875966..00000000 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "sync" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} - -func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) -} - -func putSliceEncoder(e *sliceArrayEncoder) { - e.elems = e.elems[:0] - _sliceEncoderPool.Put(e) -} - -type consoleEncoder struct { - *jsonEncoder -} - -// NewConsoleEncoder creates an encoder whose output is designed for human - -// rather than machine - consumption. It serializes the core log entry data -// (message, level, timestamp, etc.) in a plain-text format and leaves the -// structured context as JSON. -// -// Note that although the console encoder doesn't use the keys specified in the -// encoder configuration, it will omit any element whose key is set to the empty -// string. -func NewConsoleEncoder(cfg EncoderConfig) Encoder { - return consoleEncoder{newJSONEncoder(cfg, true)} -} - -func (c consoleEncoder) Clone() Encoder { - return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} -} - -func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - line := bufferpool.Get() - - // We don't want the entry's metadata to be quoted and escaped (if it's - // encoded as strings), which means that we can't use the JSON encoder. The - // simplest option is to use the memory encoder and fmt.Fprint. - // - // If this ever becomes a performance bottleneck, we can implement - // ArrayEncoder for our plain-text format. - arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { - c.EncodeTime(ent.Time, arr) - } - if c.LevelKey != "" && c.EncodeLevel != nil { - c.EncodeLevel(ent.Level, arr) - } - if ent.LoggerName != "" && c.NameKey != "" { - nameEncoder := c.EncodeName - - if nameEncoder == nil { - // Fall back to FullNameEncoder for backward compatibility. - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, arr) - } - if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { - c.EncodeCaller(ent.Caller, arr) - } - for i := range arr.elems { - if i > 0 { - line.AppendByte('\t') - } - fmt.Fprint(line, arr.elems[i]) - } - putSliceEncoder(arr) - - // Add the message itself. - if c.MessageKey != "" { - c.addTabIfNecessary(line) - line.AppendString(ent.Message) - } - - // Add any structured context. - c.writeContext(line, fields) - - // If there's no stacktrace key, honor that; this allows users to force - // single-line output. - if ent.Stack != "" && c.StacktraceKey != "" { - line.AppendByte('\n') - line.AppendString(ent.Stack) - } - - if c.LineEnding != "" { - line.AppendString(c.LineEnding) - } else { - line.AppendString(DefaultLineEnding) - } - return line, nil -} - -func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { - context := c.jsonEncoder.Clone().(*jsonEncoder) - defer context.buf.Free() - - addFields(context, extra) - context.closeOpenNamespaces() - if context.buf.Len() == 0 { - return - } - - c.addTabIfNecessary(line) - line.AppendByte('{') - line.Write(context.buf.Bytes()) - line.AppendByte('}') -} - -func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { - if line.Len() > 0 { - line.AppendByte('\t') - } -} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go deleted file mode 100644 index a1ef8b03..00000000 --- a/vendor/go.uber.org/zap/zapcore/core.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// Core is a minimal, fast logger interface. It's designed for library authors -// to wrap in a more user-friendly API. -type Core interface { - LevelEnabler - - // With adds structured context to the Core. - With([]Field) Core - // Check determines whether the supplied Entry should be logged (using the - // embedded LevelEnabler and possibly some extra logic). If the entry - // should be logged, the Core adds itself to the CheckedEntry and returns - // the result. - // - // Callers must use Check before calling Write. - Check(Entry, *CheckedEntry) *CheckedEntry - // Write serializes the Entry and any Fields supplied at the log site and - // writes them to their destination. - // - // If called, Write should always log the Entry and Fields; it should not - // replicate the logic of Check. - Write(Entry, []Field) error - // Sync flushes buffered logs (if any). - Sync() error -} - -type nopCore struct{} - -// NewNopCore returns a no-op Core. -func NewNopCore() Core { return nopCore{} } -func (nopCore) Enabled(Level) bool { return false } -func (n nopCore) With([]Field) Core { return n } -func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } -func (nopCore) Write(Entry, []Field) error { return nil } -func (nopCore) Sync() error { return nil } - -// NewCore creates a Core that writes logs to a WriteSyncer. -func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { - return &ioCore{ - LevelEnabler: enab, - enc: enc, - out: ws, - } -} - -type ioCore struct { - LevelEnabler - enc Encoder - out WriteSyncer -} - -func (c *ioCore) With(fields []Field) Core { - clone := c.clone() - addFields(clone.enc, fields) - return clone -} - -func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if c.Enabled(ent.Level) { - return ce.AddCore(ent, c) - } - return ce -} - -func (c *ioCore) Write(ent Entry, fields []Field) error { - buf, err := c.enc.EncodeEntry(ent, fields) - if err != nil { - return err - } - _, err = c.out.Write(buf.Bytes()) - buf.Free() - if err != nil { - return err - } - if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() - } - return nil -} - -func (c *ioCore) Sync() error { - return c.out.Sync() -} - -func (c *ioCore) clone() *ioCore { - return &ioCore{ - LevelEnabler: c.LevelEnabler, - enc: c.enc.Clone(), - out: c.out, - } -} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go deleted file mode 100644 index 31000e91..00000000 --- a/vendor/go.uber.org/zap/zapcore/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zapcore defines and implements the low-level interfaces upon which -// zap is built. By providing alternate implementations of these interfaces, -// external packages can extend zap's capabilities. -package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go deleted file mode 100644 index f0509522..00000000 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" - - "go.uber.org/zap/buffer" -) - -// DefaultLineEnding defines the default line ending when writing logs. -// Alternate line endings specified in EncoderConfig can override this -// behavior. -const DefaultLineEnding = "\n" - -// A LevelEncoder serializes a Level to a primitive type. -type LevelEncoder func(Level, PrimitiveArrayEncoder) - -// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, -// InfoLevel is serialized to "info". -func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.String()) -} - -// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. -// For example, InfoLevel is serialized to "info" and colored blue. -func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToLowercaseColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.String()) - } - enc.AppendString(s) -} - -// CapitalLevelEncoder serializes a Level to an all-caps string. For example, -// InfoLevel is serialized to "INFO". -func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.CapitalString()) -} - -// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. -// For example, InfoLevel is serialized to "INFO" and colored blue. -func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToCapitalColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.CapitalString()) - } - enc.AppendString(s) -} - -// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to -// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, -// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else -// is unmarshaled to LowercaseLevelEncoder. -func (e *LevelEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "capital": - *e = CapitalLevelEncoder - case "capitalColor": - *e = CapitalColorLevelEncoder - case "color": - *e = LowercaseColorLevelEncoder - default: - *e = LowercaseLevelEncoder - } - return nil -} - -// A TimeEncoder serializes a time.Time to a primitive type. -type TimeEncoder func(time.Time, PrimitiveArrayEncoder) - -// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds -// since the Unix epoch. -func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - sec := float64(nanos) / float64(time.Second) - enc.AppendFloat64(sec) -} - -// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of -// milliseconds since the Unix epoch. -func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - millis := float64(nanos) / float64(time.Millisecond) - enc.AppendFloat64(millis) -} - -// EpochNanosTimeEncoder serializes a time.Time to an integer number of -// nanoseconds since the Unix epoch. -func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendInt64(t.UnixNano()) -} - -// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string -// with millisecond precision. -func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) -} - -// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are -// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to -// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. -func (e *TimeEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "iso8601", "ISO8601": - *e = ISO8601TimeEncoder - case "millis": - *e = EpochMillisTimeEncoder - case "nanos": - *e = EpochNanosTimeEncoder - default: - *e = EpochTimeEncoder - } - return nil -} - -// A DurationEncoder serializes a time.Duration to a primitive type. -type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) - -// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. -func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendFloat64(float64(d) / float64(time.Second)) -} - -// NanosDurationEncoder serializes a time.Duration to an integer number of -// nanoseconds elapsed. -func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendInt64(int64(d)) -} - -// StringDurationEncoder serializes a time.Duration using its built-in String -// method. -func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendString(d.String()) -} - -// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled -// to StringDurationEncoder, and anything else is unmarshaled to -// NanosDurationEncoder. -func (e *DurationEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "string": - *e = StringDurationEncoder - case "nanos": - *e = NanosDurationEncoder - default: - *e = SecondsDurationEncoder - } - return nil -} - -// A CallerEncoder serializes an EntryCaller to a primitive type. -type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) - -// FullCallerEncoder serializes a caller in /full/path/to/package/file:line -// format. -func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.String()) -} - -// ShortCallerEncoder serializes a caller in package/file:line format, trimming -// all but the final directory from the full path. -func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.TrimmedPath()) -} - -// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to -// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. -func (e *CallerEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullCallerEncoder - default: - *e = ShortCallerEncoder - } - return nil -} - -// A NameEncoder serializes a period-separated logger name to a primitive -// type. -type NameEncoder func(string, PrimitiveArrayEncoder) - -// FullNameEncoder serializes the logger name as-is. -func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { - enc.AppendString(loggerName) -} - -// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is -// unmarshaled to FullNameEncoder. -func (e *NameEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullNameEncoder - default: - *e = FullNameEncoder - } - return nil -} - -// An EncoderConfig allows users to configure the concrete encoders supplied by -// zapcore. -type EncoderConfig struct { - // Set the keys used for each log entry. If any key is empty, that portion - // of the entry is omitted. - MessageKey string `json:"messageKey" yaml:"messageKey"` - LevelKey string `json:"levelKey" yaml:"levelKey"` - TimeKey string `json:"timeKey" yaml:"timeKey"` - NameKey string `json:"nameKey" yaml:"nameKey"` - CallerKey string `json:"callerKey" yaml:"callerKey"` - StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` - LineEnding string `json:"lineEnding" yaml:"lineEnding"` - // Configure the primitive representations of common complex types. For - // example, some users may want all time.Times serialized as floating-point - // seconds since epoch, while others may prefer ISO8601 strings. - EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` - EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` - EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` - EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` - // Unlike the other primitive type encoders, EncodeName is optional. The - // zero value falls back to FullNameEncoder. - EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` -} - -// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a -// map- or struct-like object to the logging context. Like maps, ObjectEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ObjectEncoder interface { - // Logging-specific marshalers. - AddArray(key string, marshaler ArrayMarshaler) error - AddObject(key string, marshaler ObjectMarshaler) error - - // Built-in types. - AddBinary(key string, value []byte) // for arbitrary bytes - AddByteString(key string, value []byte) // for UTF-8 encoded bytes - AddBool(key string, value bool) - AddComplex128(key string, value complex128) - AddComplex64(key string, value complex64) - AddDuration(key string, value time.Duration) - AddFloat64(key string, value float64) - AddFloat32(key string, value float32) - AddInt(key string, value int) - AddInt64(key string, value int64) - AddInt32(key string, value int32) - AddInt16(key string, value int16) - AddInt8(key string, value int8) - AddString(key, value string) - AddTime(key string, value time.Time) - AddUint(key string, value uint) - AddUint64(key string, value uint64) - AddUint32(key string, value uint32) - AddUint16(key string, value uint16) - AddUint8(key string, value uint8) - AddUintptr(key string, value uintptr) - - // AddReflected uses reflection to serialize arbitrary objects, so it's slow - // and allocation-heavy. - AddReflected(key string, value interface{}) error - // OpenNamespace opens an isolated namespace where all subsequent fields will - // be added. Applications can use namespaces to prevent key collisions when - // injecting loggers into sub-components or third-party libraries. - OpenNamespace(key string) -} - -// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding -// array-like objects to the logging context. Of note, it supports mixed-type -// arrays even though they aren't typical in Go. Like slices, ArrayEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ArrayEncoder interface { - // Built-in types. - PrimitiveArrayEncoder - - // Time-related types. - AppendDuration(time.Duration) - AppendTime(time.Time) - - // Logging-specific marshalers. - AppendArray(ArrayMarshaler) error - AppendObject(ObjectMarshaler) error - - // AppendReflected uses reflection to serialize arbitrary objects, so it's - // slow and allocation-heavy. - AppendReflected(value interface{}) error -} - -// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals -// only in Go's built-in types. It's included only so that Duration- and -// TimeEncoders cannot trigger infinite recursion. -type PrimitiveArrayEncoder interface { - // Built-in types. - AppendBool(bool) - AppendByteString([]byte) // for UTF-8 encoded bytes - AppendComplex128(complex128) - AppendComplex64(complex64) - AppendFloat64(float64) - AppendFloat32(float32) - AppendInt(int) - AppendInt64(int64) - AppendInt32(int32) - AppendInt16(int16) - AppendInt8(int8) - AppendString(string) - AppendUint(uint) - AppendUint64(uint64) - AppendUint32(uint32) - AppendUint16(uint16) - AppendUint8(uint8) - AppendUintptr(uintptr) -} - -// Encoder is a format-agnostic interface for all log entry marshalers. Since -// log encoders don't need to support the same wide range of use cases as -// general-purpose marshalers, it's possible to make them faster and -// lower-allocation. -// -// Implementations of the ObjectEncoder interface's methods can, of course, -// freely modify the receiver. However, the Clone and EncodeEntry methods will -// be called concurrently and shouldn't modify the receiver. -type Encoder interface { - ObjectEncoder - - // Clone copies the encoder, ensuring that adding fields to the copy doesn't - // affect the original. - Clone() Encoder - - // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. - EncodeEntry(Entry, []Field) (*buffer.Buffer, error) -} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go deleted file mode 100644 index 7d9893f3..00000000 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "strings" - "sync" - "time" - - "go.uber.org/zap/internal/bufferpool" - "go.uber.org/zap/internal/exit" - - "go.uber.org/multierr" -) - -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) - -func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) - ce.reset() - return ce -} - -func putCheckedEntry(ce *CheckedEntry) { - if ce == nil { - return - } - _cePool.Put(ce) -} - -// NewEntryCaller makes an EntryCaller from the return signature of -// runtime.Caller. -func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { - if !ok { - return EntryCaller{} - } - return EntryCaller{ - PC: pc, - File: file, - Line: line, - Defined: true, - } -} - -// EntryCaller represents the caller of a logging function. -type EntryCaller struct { - Defined bool - PC uintptr - File string - Line int -} - -// String returns the full path and line number of the caller. -func (ec EntryCaller) String() string { - return ec.FullPath() -} - -// FullPath returns a /full/path/to/package/file:line description of the -// caller. -func (ec EntryCaller) FullPath() string { - if !ec.Defined { - return "undefined" - } - buf := bufferpool.Get() - buf.AppendString(ec.File) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// TrimmedPath returns a package/file:line description of the caller, -// preserving only the leaf directory name and file name. -func (ec EntryCaller) TrimmedPath() string { - if !ec.Defined { - return "undefined" - } - // nb. To make sure we trim the path correctly on Windows too, we - // counter-intuitively need to use '/' and *not* os.PathSeparator here, - // because the path given originates from Go stdlib, specifically - // runtime.Caller() which (as of Mar/17) returns forward slashes even on - // Windows. - // - // See https://github.com/golang/go/issues/3335 - // and https://github.com/golang/go/issues/18151 - // - // for discussion on the issue on Go side. - // - // Find the last separator. - // - idx := strings.LastIndexByte(ec.File, '/') - if idx == -1 { - return ec.FullPath() - } - // Find the penultimate separator. - idx = strings.LastIndexByte(ec.File[:idx], '/') - if idx == -1 { - return ec.FullPath() - } - buf := bufferpool.Get() - // Keep everything after the penultimate separator. - buf.AppendString(ec.File[idx+1:]) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// An Entry represents a complete log message. The entry's structured context -// is already serialized, but the log level, time, message, and call site -// information are available for inspection and modification. -// -// Entries are pooled, so any functions that accept them MUST be careful not to -// retain references to them. -type Entry struct { - Level Level - Time time.Time - LoggerName string - Message string - Caller EntryCaller - Stack string -} - -// CheckWriteAction indicates what action to take after a log entry is -// processed. Actions are ordered in increasing severity. -type CheckWriteAction uint8 - -const ( - // WriteThenNoop indicates that nothing special needs to be done. It's the - // default behavior. - WriteThenNoop CheckWriteAction = iota - // WriteThenPanic causes a panic after Write. - WriteThenPanic - // WriteThenFatal causes a fatal os.Exit after Write. - WriteThenFatal -) - -// CheckedEntry is an Entry together with a collection of Cores that have -// already agreed to log it. -// -// CheckedEntry references should be created by calling AddCore or Should on a -// nil *CheckedEntry. References are returned to a pool after Write, and MUST -// NOT be retained after calling their Write method. -type CheckedEntry struct { - Entry - ErrorOutput WriteSyncer - dirty bool // best-effort detection of pool misuse - should CheckWriteAction - cores []Core -} - -func (ce *CheckedEntry) reset() { - ce.Entry = Entry{} - ce.ErrorOutput = nil - ce.dirty = false - ce.should = WriteThenNoop - for i := range ce.cores { - // don't keep references to cores - ce.cores[i] = nil - } - ce.cores = ce.cores[:0] -} - -// Write writes the entry to the stored Cores, returns any errors, and returns -// the CheckedEntry reference to a pool for immediate re-use. Finally, it -// executes any required CheckWriteAction. -func (ce *CheckedEntry) Write(fields ...Field) { - if ce == nil { - return - } - - if ce.dirty { - if ce.ErrorOutput != nil { - // Make a best effort to detect unsafe re-use of this CheckedEntry. - // If the entry is dirty, log an internal error; because the - // CheckedEntry is being used after it was returned to the pool, - // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) - ce.ErrorOutput.Sync() - } - return - } - ce.dirty = true - - var err error - for i := range ce.cores { - err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) - } - if ce.ErrorOutput != nil { - if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) - ce.ErrorOutput.Sync() - } - } - - should, msg := ce.should, ce.Message - putCheckedEntry(ce) - - switch should { - case WriteThenPanic: - panic(msg) - case WriteThenFatal: - exit.Exit() - } -} - -// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be -// used by Core.Check implementations, and is safe to call on nil CheckedEntry -// references. -func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.cores = append(ce.cores, core) - return ce -} - -// Should sets this CheckedEntry's CheckWriteAction, which controls whether a -// Core will panic or fatal after writing this log entry. Like AddCore, it's -// safe to call on nil CheckedEntry references. -func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.should = should - return ce -} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go deleted file mode 100644 index a67c7bac..00000000 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "sync" -) - -// Encodes the given error into fields of an object. A field with the given -// name is added for the error message. -// -// If the error implements fmt.Formatter, a field with the name ${key}Verbose -// is also added with the full verbose error message. -// -// Finally, if the error implements errorGroup (from go.uber.org/multierr) or -// causer (from github.com/pkg/errors), a ${key}Causes field is added with an -// array of objects containing the errors this error was comprised of. -// -// { -// "error": err.Error(), -// "errorVerbose": fmt.Sprintf("%+v", err), -// "errorCauses": [ -// ... -// ], -// } -func encodeError(key string, err error, enc ObjectEncoder) error { - basic := err.Error() - enc.AddString(key, basic) - - switch e := err.(type) { - case errorGroup: - return enc.AddArray(key+"Causes", errArray(e.Errors())) - case fmt.Formatter: - verbose := fmt.Sprintf("%+v", e) - if verbose != basic { - // This is a rich error type, like those produced by - // github.com/pkg/errors. - enc.AddString(key+"Verbose", verbose) - } - } - return nil -} - -type errorGroup interface { - // Provides read-only access to the underlying list of errors, preferably - // without causing any allocs. - Errors() []error -} - -type causer interface { - // Provides access to the error that caused this error. - Cause() error -} - -// Note that errArry and errArrayElem are very similar to the version -// implemented in the top-level error.go file. We can't re-use this because -// that would require exporting errArray as part of the zapcore API. - -// Encodes a list of errors using the standard error encoding logic. -type errArray []error - -func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - - el := newErrArrayElem(errs[i]) - arr.AppendObject(el) - el.Free() - } - return nil -} - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Encodes any error into a {"error": ...} re-using the same errors logic. -// -// May be passed in place of an array to build a single-element array. -type errArrayElem struct{ err error } - -func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) - e.err = err - return e -} - -func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { - return arr.AppendObject(e) -} - -func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { - return encodeError("error", e.err, enc) -} - -func (e *errArrayElem) Free() { - e.err = nil - _errArrayElemPool.Put(e) -} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go deleted file mode 100644 index 6a5e33e2..00000000 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "fmt" - "math" - "reflect" - "time" -) - -// A FieldType indicates which member of the Field union struct should be used -// and how it should be serialized. -type FieldType uint8 - -const ( - // UnknownType is the default field type. Attempting to add it to an encoder will panic. - UnknownType FieldType = iota - // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. - ArrayMarshalerType - // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. - ObjectMarshalerType - // BinaryType indicates that the field carries an opaque binary blob. - BinaryType - // BoolType indicates that the field carries a bool. - BoolType - // ByteStringType indicates that the field carries UTF-8 encoded bytes. - ByteStringType - // Complex128Type indicates that the field carries a complex128. - Complex128Type - // Complex64Type indicates that the field carries a complex128. - Complex64Type - // DurationType indicates that the field carries a time.Duration. - DurationType - // Float64Type indicates that the field carries a float64. - Float64Type - // Float32Type indicates that the field carries a float32. - Float32Type - // Int64Type indicates that the field carries an int64. - Int64Type - // Int32Type indicates that the field carries an int32. - Int32Type - // Int16Type indicates that the field carries an int16. - Int16Type - // Int8Type indicates that the field carries an int8. - Int8Type - // StringType indicates that the field carries a string. - StringType - // TimeType indicates that the field carries a time.Time. - TimeType - // Uint64Type indicates that the field carries a uint64. - Uint64Type - // Uint32Type indicates that the field carries a uint32. - Uint32Type - // Uint16Type indicates that the field carries a uint16. - Uint16Type - // Uint8Type indicates that the field carries a uint8. - Uint8Type - // UintptrType indicates that the field carries a uintptr. - UintptrType - // ReflectType indicates that the field carries an interface{}, which should - // be serialized using reflection. - ReflectType - // NamespaceType signals the beginning of an isolated namespace. All - // subsequent fields should be added to the new namespace. - NamespaceType - // StringerType indicates that the field carries a fmt.Stringer. - StringerType - // ErrorType indicates that the field carries an error. - ErrorType - // SkipType indicates that the field is a no-op. - SkipType -) - -// A Field is a marshaling operation used to add a key-value pair to a logger's -// context. Most fields are lazily marshaled, so it's inexpensive to add fields -// to disabled debug-level log statements. -type Field struct { - Key string - Type FieldType - Integer int64 - String string - Interface interface{} -} - -// AddTo exports a field through the ObjectEncoder interface. It's primarily -// useful to library authors, and shouldn't be necessary in most applications. -func (f Field) AddTo(enc ObjectEncoder) { - var err error - - switch f.Type { - case ArrayMarshalerType: - err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) - case ObjectMarshalerType: - err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) - case BinaryType: - enc.AddBinary(f.Key, f.Interface.([]byte)) - case BoolType: - enc.AddBool(f.Key, f.Integer == 1) - case ByteStringType: - enc.AddByteString(f.Key, f.Interface.([]byte)) - case Complex128Type: - enc.AddComplex128(f.Key, f.Interface.(complex128)) - case Complex64Type: - enc.AddComplex64(f.Key, f.Interface.(complex64)) - case DurationType: - enc.AddDuration(f.Key, time.Duration(f.Integer)) - case Float64Type: - enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) - case Float32Type: - enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) - case Int64Type: - enc.AddInt64(f.Key, f.Integer) - case Int32Type: - enc.AddInt32(f.Key, int32(f.Integer)) - case Int16Type: - enc.AddInt16(f.Key, int16(f.Integer)) - case Int8Type: - enc.AddInt8(f.Key, int8(f.Integer)) - case StringType: - enc.AddString(f.Key, f.String) - case TimeType: - if f.Interface != nil { - enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) - } else { - // Fall back to UTC if location is nil. - enc.AddTime(f.Key, time.Unix(0, f.Integer)) - } - case Uint64Type: - enc.AddUint64(f.Key, uint64(f.Integer)) - case Uint32Type: - enc.AddUint32(f.Key, uint32(f.Integer)) - case Uint16Type: - enc.AddUint16(f.Key, uint16(f.Integer)) - case Uint8Type: - enc.AddUint8(f.Key, uint8(f.Integer)) - case UintptrType: - enc.AddUintptr(f.Key, uintptr(f.Integer)) - case ReflectType: - err = enc.AddReflected(f.Key, f.Interface) - case NamespaceType: - enc.OpenNamespace(f.Key) - case StringerType: - enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) - case ErrorType: - encodeError(f.Key, f.Interface.(error), enc) - case SkipType: - break - default: - panic(fmt.Sprintf("unknown field type: %v", f)) - } - - if err != nil { - enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) - } -} - -// Equals returns whether two fields are equal. For non-primitive types such as -// errors, marshalers, or reflect types, it uses reflect.DeepEqual. -func (f Field) Equals(other Field) bool { - if f.Type != other.Type { - return false - } - if f.Key != other.Key { - return false - } - - switch f.Type { - case BinaryType, ByteStringType: - return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) - case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: - return reflect.DeepEqual(f.Interface, other.Interface) - default: - return f == other - } -} - -func addFields(enc ObjectEncoder, fields []Field) { - for i := range fields { - fields[i].AddTo(enc) - } -} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go deleted file mode 100644 index 5db4afb3..00000000 --- a/vendor/go.uber.org/zap/zapcore/hook.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type hooked struct { - Core - funcs []func(Entry) error -} - -// RegisterHooks wraps a Core and runs a collection of user-defined callback -// hooks each time a message is logged. Execution of the callbacks is blocking. -// -// This offers users an easy way to register simple callbacks (e.g., metrics -// collection) without implementing the full Core interface. -func RegisterHooks(core Core, hooks ...func(Entry) error) Core { - funcs := append([]func(Entry) error{}, hooks...) - return &hooked{ - Core: core, - funcs: funcs, - } -} - -func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - // Let the wrapped Core decide whether to log this message or not. This - // also gives the downstream a chance to register itself directly with the - // CheckedEntry. - if downstream := h.Core.Check(ent, ce); downstream != nil { - return downstream.AddCore(ent, h) - } - return ce -} - -func (h *hooked) With(fields []Field) Core { - return &hooked{ - Core: h.Core.With(fields), - funcs: h.funcs, - } -} - -func (h *hooked) Write(ent Entry, _ []Field) error { - // Since our downstream had a chance to register itself directly with the - // CheckedMessage, we don't need to call it here. - var err error - for i := range h.funcs { - err = multierr.Append(err, h.funcs[i](ent)) - } - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go deleted file mode 100644 index 2dc67d81..00000000 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "encoding/base64" - "encoding/json" - "math" - "sync" - "time" - "unicode/utf8" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -// For JSON-escaping; see jsonEncoder.safeAddString below. -const _hex = "0123456789abcdef" - -var _jsonPool = sync.Pool{New: func() interface{} { - return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} - -func putJSONEncoder(enc *jsonEncoder) { - if enc.reflectBuf != nil { - enc.reflectBuf.Free() - } - enc.EncoderConfig = nil - enc.buf = nil - enc.spaced = false - enc.openNamespaces = 0 - enc.reflectBuf = nil - enc.reflectEnc = nil - _jsonPool.Put(enc) -} - -type jsonEncoder struct { - *EncoderConfig - buf *buffer.Buffer - spaced bool // include spaces after colons and commas - openNamespaces int - - // for encoding generic values by reflection - reflectBuf *buffer.Buffer - reflectEnc *json.Encoder -} - -// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder -// appropriately escapes all field keys and values. -// -// Note that the encoder doesn't deduplicate keys, so it's possible to produce -// a message like -// {"foo":"bar","foo":"baz"} -// This is permitted by the JSON specification, but not encouraged. Many -// libraries will ignore duplicate key-value pairs (typically keeping the last -// pair) when unmarshaling, but users should attempt to avoid adding duplicate -// keys. -func NewJSONEncoder(cfg EncoderConfig) Encoder { - return newJSONEncoder(cfg, false) -} - -func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { - return &jsonEncoder{ - EncoderConfig: &cfg, - buf: bufferpool.Get(), - spaced: spaced, - } -} - -func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { - enc.addKey(key) - return enc.AppendArray(arr) -} - -func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { - enc.addKey(key) - return enc.AppendObject(obj) -} - -func (enc *jsonEncoder) AddBinary(key string, val []byte) { - enc.AddString(key, base64.StdEncoding.EncodeToString(val)) -} - -func (enc *jsonEncoder) AddByteString(key string, val []byte) { - enc.addKey(key) - enc.AppendByteString(val) -} - -func (enc *jsonEncoder) AddBool(key string, val bool) { - enc.addKey(key) - enc.AppendBool(val) -} - -func (enc *jsonEncoder) AddComplex128(key string, val complex128) { - enc.addKey(key) - enc.AppendComplex128(val) -} - -func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { - enc.addKey(key) - enc.AppendDuration(val) -} - -func (enc *jsonEncoder) AddFloat64(key string, val float64) { - enc.addKey(key) - enc.AppendFloat64(val) -} - -func (enc *jsonEncoder) AddInt64(key string, val int64) { - enc.addKey(key) - enc.AppendInt64(val) -} - -func (enc *jsonEncoder) resetReflectBuf() { - if enc.reflectBuf == nil { - enc.reflectBuf = bufferpool.Get() - enc.reflectEnc = json.NewEncoder(enc.reflectBuf) - } else { - enc.reflectBuf.Reset() - } -} - -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(obj) - if err != nil { - return err - } - enc.reflectBuf.TrimNewline() - enc.addKey(key) - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) - return err -} - -func (enc *jsonEncoder) OpenNamespace(key string) { - enc.addKey(key) - enc.buf.AppendByte('{') - enc.openNamespaces++ -} - -func (enc *jsonEncoder) AddString(key, val string) { - enc.addKey(key) - enc.AppendString(val) -} - -func (enc *jsonEncoder) AddTime(key string, val time.Time) { - enc.addKey(key) - enc.AppendTime(val) -} - -func (enc *jsonEncoder) AddUint64(key string, val uint64) { - enc.addKey(key) - enc.AppendUint64(val) -} - -func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('[') - err := arr.MarshalLogArray(enc) - enc.buf.AppendByte(']') - return err -} - -func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('{') - err := obj.MarshalLogObject(enc) - enc.buf.AppendByte('}') - return err -} - -func (enc *jsonEncoder) AppendBool(val bool) { - enc.addElementSeparator() - enc.buf.AppendBool(val) -} - -func (enc *jsonEncoder) AppendByteString(val []byte) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddByteString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendComplex128(val complex128) { - enc.addElementSeparator() - // Cast to a platform-independent, fixed-size type. - r, i := float64(real(val)), float64(imag(val)) - enc.buf.AppendByte('"') - // Because we're always in a quoted string, we can use strconv without - // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') - enc.buf.AppendFloat(i, 64) - enc.buf.AppendByte('i') - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendDuration(val time.Duration) { - cur := enc.buf.Len() - enc.EncodeDuration(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep - // JSON valid. - enc.AppendInt64(int64(val)) - } -} - -func (enc *jsonEncoder) AppendInt64(val int64) { - enc.addElementSeparator() - enc.buf.AppendInt(val) -} - -func (enc *jsonEncoder) AppendReflected(val interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(val) - if err != nil { - return err - } - enc.reflectBuf.TrimNewline() - enc.addElementSeparator() - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) - return err -} - -func (enc *jsonEncoder) AppendString(val string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTime(val time.Time) { - cur := enc.buf.Len() - enc.EncodeTime(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep - // output JSON valid. - enc.AppendInt64(val.UnixNano()) - } -} - -func (enc *jsonEncoder) AppendUint64(val uint64) { - enc.addElementSeparator() - enc.buf.AppendUint(val) -} - -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } - -func (enc *jsonEncoder) Clone() Encoder { - clone := enc.clone() - clone.buf.Write(enc.buf.Bytes()) - return clone -} - -func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() - clone.EncoderConfig = enc.EncoderConfig - clone.spaced = enc.spaced - clone.openNamespaces = enc.openNamespaces - clone.buf = bufferpool.Get() - return clone -} - -func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - final := enc.clone() - final.buf.AppendByte('{') - - if final.LevelKey != "" { - final.addKey(final.LevelKey) - cur := final.buf.Len() - final.EncodeLevel(ent.Level, final) - if cur == final.buf.Len() { - // User-supplied EncodeLevel was a no-op. Fall back to strings to keep - // output JSON valid. - final.AppendString(ent.Level.String()) - } - } - if final.TimeKey != "" { - final.AddTime(final.TimeKey, ent.Time) - } - if ent.LoggerName != "" && final.NameKey != "" { - final.addKey(final.NameKey) - cur := final.buf.Len() - nameEncoder := final.EncodeName - - // if no name encoder provided, fall back to FullNameEncoder for backwards - // compatibility - if nameEncoder == nil { - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, final) - if cur == final.buf.Len() { - // User-supplied EncodeName was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.LoggerName) - } - } - if ent.Caller.Defined && final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) - } - } - if final.MessageKey != "" { - final.addKey(enc.MessageKey) - final.AppendString(ent.Message) - } - if enc.buf.Len() > 0 { - final.addElementSeparator() - final.buf.Write(enc.buf.Bytes()) - } - addFields(final, fields) - final.closeOpenNamespaces() - if ent.Stack != "" && final.StacktraceKey != "" { - final.AddString(final.StacktraceKey, ent.Stack) - } - final.buf.AppendByte('}') - if final.LineEnding != "" { - final.buf.AppendString(final.LineEnding) - } else { - final.buf.AppendString(DefaultLineEnding) - } - - ret := final.buf - putJSONEncoder(final) - return ret, nil -} - -func (enc *jsonEncoder) truncate() { - enc.buf.Reset() -} - -func (enc *jsonEncoder) closeOpenNamespaces() { - for i := 0; i < enc.openNamespaces; i++ { - enc.buf.AppendByte('}') - } -} - -func (enc *jsonEncoder) addKey(key string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(key) - enc.buf.AppendByte('"') - enc.buf.AppendByte(':') - if enc.spaced { - enc.buf.AppendByte(' ') - } -} - -func (enc *jsonEncoder) addElementSeparator() { - last := enc.buf.Len() - 1 - if last < 0 { - return - } - switch enc.buf.Bytes()[last] { - case '{', '[', ':', ',', ' ': - return - default: - enc.buf.AppendByte(',') - if enc.spaced { - enc.buf.AppendByte(' ') - } - } -} - -func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { - enc.addElementSeparator() - switch { - case math.IsNaN(val): - enc.buf.AppendString(`"NaN"`) - case math.IsInf(val, 1): - enc.buf.AppendString(`"+Inf"`) - case math.IsInf(val, -1): - enc.buf.AppendString(`"-Inf"`) - default: - enc.buf.AppendFloat(val, bitSize) - } -} - -// safeAddString JSON-escapes a string and appends it to the internal buffer. -// Unlike the standard library's encoder, it doesn't attempt to protect the -// user from browser vulnerabilities or JSONP-related problems. -func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } -} - -// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. -func (enc *jsonEncoder) safeAddByteString(s []byte) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} - -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false -} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go deleted file mode 100644 index e575c9f4..00000000 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "errors" - "fmt" -) - -var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") - -// A Level is a logging priority. Higher levels are more important. -type Level int8 - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel Level = iota - 1 - // InfoLevel is the default logging priority. - InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel - - _minLevel = DebugLevel - _maxLevel = FatalLevel -) - -// String returns a lower-case ASCII representation of the log level. -func (l Level) String() string { - switch l { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warn" - case ErrorLevel: - return "error" - case DPanicLevel: - return "dpanic" - case PanicLevel: - return "panic" - case FatalLevel: - return "fatal" - default: - return fmt.Sprintf("Level(%d)", l) - } -} - -// CapitalString returns an all-caps ASCII representation of the log level. -func (l Level) CapitalString() string { - // Printing levels in all-caps is common enough that we should export this - // functionality. - switch l { - case DebugLevel: - return "DEBUG" - case InfoLevel: - return "INFO" - case WarnLevel: - return "WARN" - case ErrorLevel: - return "ERROR" - case DPanicLevel: - return "DPANIC" - case PanicLevel: - return "PANIC" - case FatalLevel: - return "FATAL" - default: - return fmt.Sprintf("LEVEL(%d)", l) - } -} - -// MarshalText marshals the Level to text. Note that the text representation -// drops the -Level suffix (see example). -func (l Level) MarshalText() ([]byte, error) { - return []byte(l.String()), nil -} - -// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText -// expects the text representation of a Level to drop the -Level suffix (see -// example). -// -// In particular, this makes it easy to configure logging levels using YAML, -// TOML, or JSON files. -func (l *Level) UnmarshalText(text []byte) error { - if l == nil { - return errUnmarshalNilLevel - } - if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { - return fmt.Errorf("unrecognized level: %q", text) - } - return nil -} - -func (l *Level) unmarshalText(text []byte) bool { - switch string(text) { - case "debug", "DEBUG": - *l = DebugLevel - case "info", "INFO", "": // make the zero value useful - *l = InfoLevel - case "warn", "WARN": - *l = WarnLevel - case "error", "ERROR": - *l = ErrorLevel - case "dpanic", "DPANIC": - *l = DPanicLevel - case "panic", "PANIC": - *l = PanicLevel - case "fatal", "FATAL": - *l = FatalLevel - default: - return false - } - return true -} - -// Set sets the level for the flag.Value interface. -func (l *Level) Set(s string) error { - return l.UnmarshalText([]byte(s)) -} - -// Get gets the level for the flag.Getter interface. -func (l *Level) Get() interface{} { - return *l -} - -// Enabled returns true if the given level is at or above this level. -func (l Level) Enabled(lvl Level) bool { - return lvl >= l -} - -// LevelEnabler decides whether a given logging level is enabled when logging a -// message. -// -// Enablers are intended to be used to implement deterministic filters; -// concerns like sampling are better implemented as a Core. -// -// Each concrete Level value implements a static LevelEnabler which returns -// true for itself and all higher logging levels. For example WarnLevel.Enabled() -// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and -// FatalLevel, but return false for InfoLevel and DebugLevel. -type LevelEnabler interface { - Enabled(Level) bool -} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go deleted file mode 100644 index 7af8dadc..00000000 --- a/vendor/go.uber.org/zap/zapcore/level_strings.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/zap/internal/color" - -var ( - _levelToColor = map[Level]color.Color{ - DebugLevel: color.Magenta, - InfoLevel: color.Blue, - WarnLevel: color.Yellow, - ErrorLevel: color.Red, - DPanicLevel: color.Red, - PanicLevel: color.Red, - FatalLevel: color.Red, - } - _unknownLevelColor = color.Red - - _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) - _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) -) - -func init() { - for level, color := range _levelToColor { - _levelToLowercaseColorString[level] = color.Add(level.String()) - _levelToCapitalColorString[level] = color.Add(level.CapitalString()) - } -} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go deleted file mode 100644 index 2627a653..00000000 --- a/vendor/go.uber.org/zap/zapcore/marshaler.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// ObjectMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -type ObjectMarshaler interface { - MarshalLogObject(ObjectEncoder) error -} - -// ObjectMarshalerFunc is a type adapter that turns a function into an -// ObjectMarshaler. -type ObjectMarshalerFunc func(ObjectEncoder) error - -// MarshalLogObject calls the underlying function. -func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { - return f(enc) -} - -// ArrayMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -type ArrayMarshaler interface { - MarshalLogArray(ArrayEncoder) error -} - -// ArrayMarshalerFunc is a type adapter that turns a function into an -// ArrayMarshaler. -type ArrayMarshalerFunc func(ArrayEncoder) error - -// MarshalLogArray calls the underlying function. -func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { - return f(enc) -} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go deleted file mode 100644 index 6ef85b09..00000000 --- a/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "time" - -// MapObjectEncoder is an ObjectEncoder backed by a simple -// map[string]interface{}. It's not fast enough for production use, but it's -// helpful in tests. -type MapObjectEncoder struct { - // Fields contains the entire encoded log context. - Fields map[string]interface{} - // cur is a pointer to the namespace we're currently writing to. - cur map[string]interface{} -} - -// NewMapObjectEncoder creates a new map-backed ObjectEncoder. -func NewMapObjectEncoder() *MapObjectEncoder { - m := make(map[string]interface{}) - return &MapObjectEncoder{ - Fields: m, - cur: m, - } -} - -// AddArray implements ObjectEncoder. -func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { - arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} - err := v.MarshalLogArray(arr) - m.cur[key] = arr.elems - return err -} - -// AddObject implements ObjectEncoder. -func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { - newMap := NewMapObjectEncoder() - m.cur[k] = newMap.Fields - return v.MarshalLogObject(newMap) -} - -// AddBinary implements ObjectEncoder. -func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } - -// AddByteString implements ObjectEncoder. -func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } - -// AddBool implements ObjectEncoder. -func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } - -// AddDuration implements ObjectEncoder. -func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } - -// AddComplex128 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } - -// AddComplex64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } - -// AddFloat64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } - -// AddFloat32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } - -// AddInt implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } - -// AddInt64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } - -// AddInt32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } - -// AddInt16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } - -// AddInt8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } - -// AddString implements ObjectEncoder. -func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } - -// AddTime implements ObjectEncoder. -func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } - -// AddUint implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } - -// AddUint64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } - -// AddUint32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } - -// AddUint16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } - -// AddUint8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } - -// AddUintptr implements ObjectEncoder. -func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } - -// AddReflected implements ObjectEncoder. -func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { - m.cur[k] = v - return nil -} - -// OpenNamespace implements ObjectEncoder. -func (m *MapObjectEncoder) OpenNamespace(k string) { - ns := make(map[string]interface{}) - m.cur[k] = ns - m.cur = ns -} - -// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like -// the MapObjectEncoder, it's not designed for production use. -type sliceArrayEncoder struct { - elems []interface{} -} - -func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { - enc := &sliceArrayEncoder{} - err := v.MarshalLogArray(enc) - s.elems = append(s.elems, enc.elems) - return err -} - -func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { - m := NewMapObjectEncoder() - err := v.MarshalLogObject(m) - s.elems = append(s.elems, m.Fields) - return err -} - -func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { - s.elems = append(s.elems, v) - return nil -} - -func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go deleted file mode 100644 index e3164186..00000000 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" - - "go.uber.org/atomic" -) - -const ( - _numLevels = _maxLevel - _minLevel + 1 - _countersPerLevel = 4096 -) - -type counter struct { - resetAt atomic.Int64 - counter atomic.Uint64 -} - -type counters [_numLevels][_countersPerLevel]counter - -func newCounters() *counters { - return &counters{} -} - -func (cs *counters) get(lvl Level, key string) *counter { - i := lvl - _minLevel - j := fnv32a(key) % _countersPerLevel - return &cs[i][j] -} - -// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc -func fnv32a(s string) uint32 { - const ( - offset32 = 2166136261 - prime32 = 16777619 - ) - hash := uint32(offset32) - for i := 0; i < len(s); i++ { - hash ^= uint32(s[i]) - hash *= prime32 - } - return hash -} - -func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { - tn := t.UnixNano() - resetAfter := c.resetAt.Load() - if resetAfter > tn { - return c.counter.Inc() - } - - c.counter.Store(1) - - newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { - // We raced with another goroutine trying to reset, and it also reset - // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() - } - - return 1 -} - -type sampler struct { - Core - - counts *counters - tick time.Duration - first, thereafter uint64 -} - -// NewSampler creates a Core that samples incoming entries, which caps the CPU -// and I/O load of logging while attempting to preserve a representative subset -// of your logs. -// -// Zap samples by logging the first N entries with a given level and message -// each tick. If more Entries with the same level and message are seen during -// the same interval, every Mth message is logged and the rest are dropped. -// -// Keep in mind that zap's sampling implementation is optimized for speed over -// absolute precision; under load, each tick may be slightly over- or -// under-sampled. -func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return &sampler{ - Core: core, - tick: tick, - counts: newCounters(), - first: uint64(first), - thereafter: uint64(thereafter), - } -} - -func (s *sampler) With(fields []Field) Core { - return &sampler{ - Core: s.Core.With(fields), - tick: s.tick, - counts: s.counts, - first: s.first, - thereafter: s.thereafter, - } -} - -func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if !s.Enabled(ent.Level) { - return ce - } - - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - return ce - } - return s.Core.Check(ent, ce) -} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go deleted file mode 100644 index 07a32eef..00000000 --- a/vendor/go.uber.org/zap/zapcore/tee.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type multiCore []Core - -// NewTee creates a Core that duplicates log entries into two or more -// underlying Cores. -// -// Calling it with a single Core returns the input unchanged, and calling -// it with no input returns a no-op Core. -func NewTee(cores ...Core) Core { - switch len(cores) { - case 0: - return NewNopCore() - case 1: - return cores[0] - default: - return multiCore(cores) - } -} - -func (mc multiCore) With(fields []Field) Core { - clone := make(multiCore, len(mc)) - for i := range mc { - clone[i] = mc[i].With(fields) - } - return clone -} - -func (mc multiCore) Enabled(lvl Level) bool { - for i := range mc { - if mc[i].Enabled(lvl) { - return true - } - } - return false -} - -func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - for i := range mc { - ce = mc[i].Check(ent, ce) - } - return ce -} - -func (mc multiCore) Write(ent Entry, fields []Field) error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Write(ent, fields)) - } - return err -} - -func (mc multiCore) Sync() error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Sync()) - } - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go deleted file mode 100644 index 209e25fe..00000000 --- a/vendor/go.uber.org/zap/zapcore/write_syncer.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "io" - "sync" - - "go.uber.org/multierr" -) - -// A WriteSyncer is an io.Writer that can also flush any buffered data. Note -// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. -type WriteSyncer interface { - io.Writer - Sync() error -} - -// AddSync converts an io.Writer to a WriteSyncer. It attempts to be -// intelligent: if the concrete type of the io.Writer implements WriteSyncer, -// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. -func AddSync(w io.Writer) WriteSyncer { - switch w := w.(type) { - case WriteSyncer: - return w - default: - return writerWrapper{w} - } -} - -type lockedWriteSyncer struct { - sync.Mutex - ws WriteSyncer -} - -// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In -// particular, *os.Files must be locked before use. -func Lock(ws WriteSyncer) WriteSyncer { - if _, ok := ws.(*lockedWriteSyncer); ok { - // no need to layer on another lock - return ws - } - return &lockedWriteSyncer{ws: ws} -} - -func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { - s.Lock() - n, err := s.ws.Write(bs) - s.Unlock() - return n, err -} - -func (s *lockedWriteSyncer) Sync() error { - s.Lock() - err := s.ws.Sync() - s.Unlock() - return err -} - -type writerWrapper struct { - io.Writer -} - -func (w writerWrapper) Sync() error { - return nil -} - -type multiWriteSyncer []WriteSyncer - -// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes -// and sync calls, much like io.MultiWriter. -func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { - if len(ws) == 1 { - return ws[0] - } - // Copy to protect against https://github.com/golang/go/issues/7809 - return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) -} - -// See https://golang.org/src/io/multi.go -// When not all underlying syncers write the same number of bytes, -// the smallest number is returned even though Write() is called on -// all of them. -func (ws multiWriteSyncer) Write(p []byte) (int, error) { - var writeErr error - nWritten := 0 - for _, w := range ws { - n, err := w.Write(p) - writeErr = multierr.Append(writeErr, err) - if nWritten == 0 && n != 0 { - nWritten = n - } else if n < nWritten { - nWritten = n - } - } - return nWritten, writeErr -} - -func (ws multiWriteSyncer) Sync() error { - var err error - for _, w := range ws { - err = multierr.Append(err, w.Sync()) - } - return err -}