diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 0000000..bebf4a5 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,43 @@ +{ + "ImportPath": "github.com/jbenet/datastore.go", + "GoVersion": "go1.3.1", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "code.google.com/p/go-uuid/uuid", + "Comment": "null-12", + "Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9" + }, + { + "ImportPath": "code.google.com/p/snappy-go/snappy", + "Comment": "null-15", + "Rev": "12e4b4183793ac4b061921e7980845e750679fd0" + }, + { + "ImportPath": "github.com/codahale/blake2", + "Rev": "3fa823583afba430e8fc7cdbcc670dbf90bfacc4" + }, + { + "ImportPath": "github.com/hashicorp/golang-lru", + "Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370" + }, + { + "ImportPath": "github.com/mattbaird/elastigo/api", + "Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d" + }, + { + "ImportPath": "github.com/mattbaird/elastigo/core", + "Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb", + "Rev": "9bca75c48d6c31becfbb127702b425e7226052e3" + }, + { + "ImportPath": "gopkg.in/check.v1", + "Rev": "91ae5f88a67b14891cfd43895b01164f6c120420" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 0000000..4cdaa53 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 0000000..f037d68 --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE new file mode 100644 index 0000000..ab6b011 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go new file mode 100644 index 0000000..50a0f2d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go new file mode 100644 index 0000000..d8bd013 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go new file mode 100644 index 0000000..cdd4192 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID dervied from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go new file mode 100644 index 0000000..dd0a8ac --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "net" + +var ( + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + if nodeID == nil { + SetNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go new file mode 100644 index 0000000..b9369c2 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + mu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// adjusts the clock sequence as needed. An error is returned if the current +// time cannot be determined. +func GetTime() (Time, error) { + defer mu.Unlock() + mu.Lock() + return getTime() +} + +func getTime() (Time, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer mu.Unlock() + mu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer mu.Unlock() + mu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go new file mode 100644 index 0000000..de40b10 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go new file mode 100644 index 0000000..2920fae --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go @@ -0,0 +1,163 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } + panic("unreachable") +} + +// Version returns the verison of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go new file mode 100644 index 0000000..417ebeb --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go @@ -0,0 +1,390 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" +) + +type test struct { + in string + version Version + variant Variant + isuuid bool +} + +var tests = []test{ + {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, + {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, + {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, + {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, + {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, + {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, + {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, + {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, + {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, + {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, + {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, + {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, + {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, + {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, + + {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, + {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, + + {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, + {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, +} + +var constants = []struct { + c interface{} + name string +}{ + {Person, "Person"}, + {Group, "Group"}, + {Org, "Org"}, + {Invalid, "Invalid"}, + {RFC4122, "RFC4122"}, + {Reserved, "Reserved"}, + {Microsoft, "Microsoft"}, + {Future, "Future"}, + {Domain(17), "Domain17"}, + {Variant(42), "BadVariant42"}, +} + +func testTest(t *testing.T, in string, tt test) { + uuid := Parse(in) + if ok := (uuid != nil); ok != tt.isuuid { + t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) + } + if uuid == nil { + return + } + + if v := uuid.Variant(); v != tt.variant { + t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) + } + if v, _ := uuid.Version(); v != tt.version { + t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) + } +} + +func TestUUID(t *testing.T) { + for _, tt := range tests { + testTest(t, tt.in, tt) + testTest(t, strings.ToUpper(tt.in), tt) + } +} + +func TestConstants(t *testing.T) { + for x, tt := range constants { + v, ok := tt.c.(fmt.Stringer) + if !ok { + t.Errorf("%x: %v: not a stringer", x, v) + } else if s := v.String(); s != tt.name { + v, _ := tt.c.(int) + t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name) + } + } +} + +func TestRandomUUID(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + uuid := NewRandom() + s := uuid.String() + if m[s] { + t.Errorf("NewRandom returned duplicated UUID %s\n", s) + } + m[s] = true + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func TestNew(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + s := New() + if m[s] { + t.Errorf("New returned duplicated UUID %s\n", s) + } + m[s] = true + uuid := Parse(s) + if uuid == nil { + t.Errorf("New returned %q which does not decode\n", s) + continue + } + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func clockSeq(t *testing.T, uuid UUID) int { + seq, ok := uuid.ClockSequence() + if !ok { + t.Fatalf("%s: invalid clock sequence\n", uuid) + } + return seq +} + +func TestClockSeq(t *testing.T) { + // Fake time.Now for this test to return a monotonically advancing time; restore it at end. + defer func(orig func() time.Time) { timeNow = orig }(timeNow) + monTime := time.Now() + timeNow = func() time.Time { + monTime = monTime.Add(1 * time.Second) + return monTime + } + + SetClockSequence(-1) + uuid1 := NewUUID() + uuid2 := NewUUID() + + if clockSeq(t, uuid1) != clockSeq(t, uuid2) { + t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2)) + } + + SetClockSequence(-1) + uuid2 = NewUUID() + + // Just on the very off chance we generated the same sequence + // two times we try again. + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + SetClockSequence(-1) + uuid2 = NewUUID() + } + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1)) + } + + SetClockSequence(0x1234) + uuid1 = NewUUID() + if seq := clockSeq(t, uuid1); seq != 0x1234 { + t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq) + } +} + +func TestCoding(t *testing.T) { + text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" + urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" + data := UUID{ + 0x7d, 0x44, 0x48, 0x40, + 0x9d, 0xc0, + 0x11, 0xd1, + 0xb2, 0x45, + 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, + } + if v := data.String(); v != text { + t.Errorf("%x: encoded to %s, expected %s\n", data, v, text) + } + if v := data.URN(); v != urn { + t.Errorf("%x: urn is %s, expected %s\n", data, v, urn) + } + + uuid := Parse(text) + if !Equal(uuid, data) { + t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data) + } +} + +func TestVersion1(t *testing.T) { + uuid1 := NewUUID() + uuid2 := NewUUID() + + if Equal(uuid1, uuid2) { + t.Errorf("%s:duplicate uuid\n", uuid1) + } + if v, _ := uuid1.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid1, v) + } + if v, _ := uuid2.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid2, v) + } + n1 := uuid1.NodeID() + n2 := uuid2.NodeID() + if !bytes.Equal(n1, n2) { + t.Errorf("Different nodes %x != %x\n", n1, n2) + } + t1, ok := uuid1.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid1) + } + t2, ok := uuid2.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid2) + } + q1, ok := uuid1.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence\n", uuid1) + } + q2, ok := uuid2.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence", uuid2) + } + + switch { + case t1 == t2 && q1 == q2: + t.Errorf("time stopped\n") + case t1 > t2 && q1 == q2: + t.Errorf("time reversed\n") + case t1 < t2 && q1 != q2: + t.Errorf("clock sequence chaned unexpectedly\n") + } +} + +func TestNodeAndTime(t *testing.T) { + // Time is February 5, 1998 12:30:23.136364800 AM GMT + + uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") + node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} + + ts, ok := uuid.Time() + if ok { + c := time.Unix(ts.UnixTime()) + want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) + if !c.Equal(want) { + t.Errorf("Got time %v, want %v", c, want) + } + } else { + t.Errorf("%s: bad time\n", uuid) + } + if !bytes.Equal(node, uuid.NodeID()) { + t.Errorf("Expected node %v got %v\n", node, uuid.NodeID()) + } +} + +func TestMD5(t *testing.T) { + uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String() + want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" + if uuid != want { + t.Errorf("MD5: got %q expected %q\n", uuid, want) + } +} + +func TestSHA1(t *testing.T) { + uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String() + want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" + if uuid != want { + t.Errorf("SHA1: got %q expected %q\n", uuid, want) + } +} + +func TestNodeID(t *testing.T) { + nid := []byte{1, 2, 3, 4, 5, 6} + SetNodeInterface("") + s := NodeInterface() + if s == "" || s == "user" { + t.Errorf("NodeInterface %q after SetInteface\n", s) + } + node1 := NodeID() + if node1 == nil { + t.Errorf("NodeID nil after SetNodeInterface\n", s) + } + SetNodeID(nid) + s = NodeInterface() + if s != "user" { + t.Errorf("Expected NodeInterface %q got %q\n", "user", s) + } + node2 := NodeID() + if node2 == nil { + t.Errorf("NodeID nil after SetNodeID\n", s) + } + if bytes.Equal(node1, node2) { + t.Errorf("NodeID not changed after SetNodeID\n", s) + } else if !bytes.Equal(nid, node2) { + t.Errorf("NodeID is %x, expected %x\n", node2, nid) + } +} + +func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) { + if uuid == nil { + t.Errorf("%s failed\n", name) + return + } + if v, _ := uuid.Version(); v != 2 { + t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v) + return + } + if v, ok := uuid.Domain(); !ok || v != domain { + if !ok { + t.Errorf("%s: %d: Domain failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v) + } + } + if v, ok := uuid.Id(); !ok || v != id { + if !ok { + t.Errorf("%s: %d: Id failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v) + } + } +} + +func TestDCE(t *testing.T) { + testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678) + testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid())) + testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid())) +} + +type badRand struct{} + +func (r badRand) Read(buf []byte) (int, error) { + for i, _ := range buf { + buf[i] = byte(i) + } + return len(buf), nil +} + +func TestBadRand(t *testing.T) { + SetRand(badRand{}) + uuid1 := New() + uuid2 := New() + if uuid1 != uuid2 { + t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2) + } + SetRand(nil) + uuid1 = New() + uuid2 = New() + if uuid1 == uuid2 { + t.Errorf("unexecpted duplicates, got %q\n", uuid1) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go new file mode 100644 index 0000000..6358004 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], clock_seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go new file mode 100644 index 0000000..b3d4a36 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go new file mode 100644 index 0000000..d93c1b9 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" +) + +// ErrCorrupt reports that the input is invalid. +var ErrCorrupt = errors.New("snappy: corrupt input") + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n == 0 { + return 0, 0, ErrCorrupt + } + if uint64(int(v)) != v { + return 0, 0, errors.New("snappy: decoded block is too large") + } + return int(v), n, nil +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if len(dst) < dLen { + dst = make([]byte, dLen) + } + + var d, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint(src[s] >> 2) + switch { + case x < 60: + s += 1 + case x == 60: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-1]) + case x == 61: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-2]) | uint(src[s-1])<<8 + case x == 62: + s += 4 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 + case x == 63: + s += 5 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 + } + length = int(x + 1) + if length <= 0 { + return nil, errors.New("snappy: unsupported literal length") + } + if length > len(dst)-d || length > len(src)-s { + return nil, ErrCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) + + case tagCopy2: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(src[s-2]) | int(src[s-1])<<8 + + case tagCopy4: + return nil, errors.New("snappy: unsupported COPY_4 tag") + } + + end := d + length + if offset > d || end > len(dst) { + return nil, ErrCorrupt + } + for ; d < end; d++ { + dst[d] = dst[d-offset] + } + } + if d != dLen { + return nil, ErrCorrupt + } + return dst[:d], nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go new file mode 100644 index 0000000..b2371db --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" +) + +// We limit how far copy back-references can go, the same as the C++ code. +const maxOffset = 1 << 15 + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + case n < 1<<16: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + case n < 1<<24: + dst[0] = 62<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + i = 4 + case int64(n) < 1<<32: + dst[0] = 63<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + dst[4] = uint8(n >> 24) + i = 5 + default: + panic("snappy: source buffer is too long") + } + if copy(dst[i:], lit) != len(lit) { + panic("snappy: destination buffer is too short") + } + return i + len(lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst []byte, offset, length int) int { + i := 0 + for length > 0 { + x := length - 4 + if 0 <= x && x < 1<<3 && offset < 1<<11 { + dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + i += 2 + break + } + + x = length + if x > 1<<6 { + x = 1 << 6 + } + dst[i+0] = uint8(x-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= x + } + return i +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Encode(dst, src []byte) ([]byte, error) { + if n := MaxEncodedLen(len(src)); len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + // Return early if src is short. + if len(src) <= 4 { + if len(src) != 0 { + d += emitLiteral(dst[d:], src) + } + return dst[:d], nil + } + + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + const maxTableSize = 1 << 14 + shift, tableSize := uint(32-8), 1<<8 + for tableSize < maxTableSize && tableSize < len(src) { + shift-- + tableSize *= 2 + } + var table [maxTableSize]int + + // Iterate over the source bytes. + var ( + s int // The iterator position. + t int // The last position with the same hash as s. + lit int // The start position of any pending literal bytes. + ) + for s+3 < len(src) { + // Update the hash table. + b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] + h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 + p := &table[(h*0x1e35a7bd)>>shift] + // We need to to store values in [-1, inf) in table. To save + // some initialization time, (re)use the table's zero value + // and shift the values against this zero: add 1 on writes, + // subtract 1 on reads. + t, *p = *p-1, s+1 + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { + s++ + continue + } + // Otherwise, we have a match. First, emit any pending literal bytes. + if lit != s { + d += emitLiteral(dst[d:], src[lit:s]) + } + // Extend the match to be as long as possible. + s0 := s + s, t = s+4, t+4 + for s < len(src) && src[s] == src[t] { + s++ + t++ + } + // Emit the copied bytes. + d += emitCopy(dst[d:], s-t, s-s0) + lit = s + } + + // Emit any final pending literal bytes and return. + if lit != len(src) { + d += emitLiteral(dst[d:], src[lit:]) + } + return dst[:d], nil +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +func MaxEncodedLen(srcLen int) int { + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + return 32 + srcLen + srcLen/6 +} diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go new file mode 100644 index 0000000..2f1b790 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go @@ -0,0 +1,38 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at http://code.google.com/p/snappy/ +package snappy + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer supported. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go new file mode 100644 index 0000000..7ba8392 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go @@ -0,0 +1,261 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path/filepath" + "strings" + "testing" +) + +var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + +func roundtrip(b, ebuf, dbuf []byte) error { + e, err := Encode(ebuf, b) + if err != nil { + return fmt.Errorf("encoding error: %v", err) + } + d, err := Decode(dbuf, e) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if !bytes.Equal(b, d) { + return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rand.Seed(27354294) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(rand.Uint32()) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func benchDecode(b *testing.B, src []byte) { + encoded, err := Encode(nil, src) + if err != nil { + b.Fatal(err) + } + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Decode(src, encoded) + } +} + +func benchEncode(b *testing.B, src []byte) { + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + dst := make([]byte, MaxEncodedLen(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Encode(dst, src) + } +} + +func readFile(b *testing.B, filename string) []byte { + src, err := ioutil.ReadFile(filename) + if err != nil { + b.Fatalf("failed reading %s: %s", filename, err) + } + if len(src) == 0 { + b.Fatalf("%s has zero length", filename) + } + return src +} + +// expand returns a slice of length n containing repeated copies of src. +func expand(src []byte, n int) []byte { + dst := make([]byte, n) + for x := dst; len(x) > 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +// testFiles' values are copied directly from +// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string +}{ + {"html", "html"}, + {"urls", "urls.10K"}, + {"jpg", "house.jpg"}, + {"pdf", "mapreduce-osdi-1.pdf"}, + {"html4", "html_x_4"}, + {"cp", "cp.html"}, + {"c", "fields.c"}, + {"lsp", "grammar.lsp"}, + {"xls", "kennedy.xls"}, + {"txt1", "alice29.txt"}, + {"txt2", "asyoulik.txt"}, + {"txt3", "lcet10.txt"}, + {"txt4", "plrabn12.txt"}, + {"bin", "ptt5"}, + {"sum", "sum"}, + {"man", "xargs.1"}, + {"pb", "geo.protodata"}, + {"gaviota", "kppkn.gtb"}, +} + +// The test data files are present at this canonical URL. +const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" + +func downloadTestdata(basename string) (errRet error) { + filename := filepath.Join("testdata", basename) + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + resp, err := http.Get(baseURL + basename) + if err != nil { + return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) + } + defer resp.Body.Close() + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to write %s: %s", filename, err) + } + return nil +} + +func benchFile(b *testing.B, n int, decode bool) { + filename := filepath.Join("testdata", testFiles[n].filename) + if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { + if !*download { + b.Fatal("test data not found; skipping benchmark without the -download flag") + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { + b.Fatalf("failed to create testdata: %s", err) + } + for _, tf := range testFiles { + if err := downloadTestdata(tf.filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + } + } + data := readFile(b, filename) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } +func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } +func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } +func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } +func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } +func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } +func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } +func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } +func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } +func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } +func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } +func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/.gitignore b/Godeps/_workspace/src/github.com/codahale/blake2/.gitignore new file mode 100644 index 0000000..e104a7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/.gitignore @@ -0,0 +1,2 @@ +*.test +*.out diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/.travis.yml b/Godeps/_workspace/src/github.com/codahale/blake2/.travis.yml new file mode 100644 index 0000000..13f75f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.2 + - tip +notifications: + # See http://about.travis-ci.org/docs/user/build-configuration/ to learn more + # about configuring notification recipients and more. + email: + recipients: + - coda.hale@gmail.com diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/LICENSE b/Godeps/_workspace/src/github.com/codahale/blake2/LICENSE new file mode 100644 index 0000000..f9835c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Coda Hale + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/README.md b/Godeps/_workspace/src/github.com/codahale/blake2/README.md new file mode 100644 index 0000000..7f66c55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/README.md @@ -0,0 +1,9 @@ +blake2 +====== + +[![Build Status](https://travis-ci.org/codahale/blake2.png?branch=master)](https://travis-ci.org/codahale/blake2) + +A Go wrapper of the [BLAKE2](https://github.com/BLAKE2/BLAKE2) hash library, +using the public domain, SSE-optimized C implementation. + +For documentation, check [godoc](http://godoc.org/github.com/codahale/blake2). diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/benchmarks_test.go b/Godeps/_workspace/src/github.com/codahale/blake2/benchmarks_test.go new file mode 100644 index 0000000..be4e867 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/benchmarks_test.go @@ -0,0 +1,42 @@ +package blake2 + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "hash" + "testing" +) + +func benchmarkHash(b *testing.B, hash func() hash.Hash) { + b.SetBytes(1024 * 1024) + data := make([]byte, 1024) + for i := 0; i < b.N; i++ { + h := hash() + for j := 0; j < 1024; j++ { + h.Write(data) + } + h.Sum(nil) + } +} + +func BenchmarkBlake2B(b *testing.B) { + benchmarkHash(b, NewBlake2B) +} + +func BenchmarkMD5(b *testing.B) { + benchmarkHash(b, md5.New) +} + +func BenchmarkSHA1(b *testing.B) { + benchmarkHash(b, sha1.New) +} + +func BenchmarkSHA256(b *testing.B) { + benchmarkHash(b, sha256.New) +} + +func BenchmarkSHA512(b *testing.B) { + benchmarkHash(b, sha512.New) +} diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2-config.h b/Godeps/_workspace/src/github.com/codahale/blake2/blake2-config.h new file mode 100644 index 0000000..70d61f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2-config.h @@ -0,0 +1,72 @@ +/* + BLAKE2 reference source code package - optimized C implementations + + Written in 2012 by Samuel Neves + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see . +*/ +#pragma once +#ifndef __BLAKE2_CONFIG_H__ +#define __BLAKE2_CONFIG_H__ + +// These don't work everywhere +#if defined(__SSE2__) +#define HAVE_SSE2 +#endif + +#if defined(__SSSE3__) +#define HAVE_SSSE3 +#endif + +#if defined(__SSE4_1__) +#define HAVE_SSE41 +#endif + +#if defined(__AVX__) +#define HAVE_AVX +#endif + +#if defined(__XOP__) +#define HAVE_XOP +#endif + + +#ifdef HAVE_AVX2 +#ifndef HAVE_AVX +#define HAVE_AVX +#endif +#endif + +#ifdef HAVE_XOP +#ifndef HAVE_AVX +#define HAVE_AVX +#endif +#endif + +#ifdef HAVE_AVX +#ifndef HAVE_SSE41 +#define HAVE_SSE41 +#endif +#endif + +#ifdef HAVE_SSE41 +#ifndef HAVE_SSSE3 +#define HAVE_SSSE3 +#endif +#endif + +#ifdef HAVE_SSSE3 +#define HAVE_SSE2 +#endif + +#if !defined(HAVE_SSE2) +#error "This code requires at least SSE2." +#endif + +#endif + diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2-impl.h b/Godeps/_workspace/src/github.com/codahale/blake2/blake2-impl.h new file mode 100644 index 0000000..c988a94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2-impl.h @@ -0,0 +1,133 @@ +/* + BLAKE2 reference source code package - optimized C implementations + + Written in 2012 by Samuel Neves + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see . +*/ +#pragma once +#ifndef __BLAKE2_IMPL_H__ +#define __BLAKE2_IMPL_H__ + +#include + +static inline uint32_t load32( const void *src ) +{ +#if defined(NATIVE_LITTLE_ENDIAN) + return *( uint32_t * )( src ); +#else + const uint8_t *p = ( uint8_t * )src; + uint32_t w = *p++; + w |= ( uint32_t )( *p++ ) << 8; + w |= ( uint32_t )( *p++ ) << 16; + w |= ( uint32_t )( *p++ ) << 24; + return w; +#endif +} + +static inline uint64_t load64( const void *src ) +{ +#if defined(NATIVE_LITTLE_ENDIAN) + return *( uint64_t * )( src ); +#else + const uint8_t *p = ( uint8_t * )src; + uint64_t w = *p++; + w |= ( uint64_t )( *p++ ) << 8; + w |= ( uint64_t )( *p++ ) << 16; + w |= ( uint64_t )( *p++ ) << 24; + w |= ( uint64_t )( *p++ ) << 32; + w |= ( uint64_t )( *p++ ) << 40; + w |= ( uint64_t )( *p++ ) << 48; + w |= ( uint64_t )( *p++ ) << 56; + return w; +#endif +} + +static inline void store32( void *dst, uint32_t w ) +{ +#if defined(NATIVE_LITTLE_ENDIAN) + *( uint32_t * )( dst ) = w; +#else + uint8_t *p = ( uint8_t * )dst; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; +#endif +} + +static inline void store64( void *dst, uint64_t w ) +{ +#if defined(NATIVE_LITTLE_ENDIAN) + *( uint64_t * )( dst ) = w; +#else + uint8_t *p = ( uint8_t * )dst; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; +#endif +} + +static inline uint64_t load48( const void *src ) +{ + const uint8_t *p = ( const uint8_t * )src; + uint64_t w = *p++; + w |= ( uint64_t )( *p++ ) << 8; + w |= ( uint64_t )( *p++ ) << 16; + w |= ( uint64_t )( *p++ ) << 24; + w |= ( uint64_t )( *p++ ) << 32; + w |= ( uint64_t )( *p++ ) << 40; + return w; +} + +static inline void store48( void *dst, uint64_t w ) +{ + uint8_t *p = ( uint8_t * )dst; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; w >>= 8; + *p++ = ( uint8_t )w; +} + +static inline uint32_t rotl32( const uint32_t w, const unsigned c ) +{ + return ( w << c ) | ( w >> ( 32 - c ) ); +} + +static inline uint64_t rotl64( const uint64_t w, const unsigned c ) +{ + return ( w << c ) | ( w >> ( 64 - c ) ); +} + +static inline uint32_t rotr32( const uint32_t w, const unsigned c ) +{ + return ( w >> c ) | ( w << ( 32 - c ) ); +} + +static inline uint64_t rotr64( const uint64_t w, const unsigned c ) +{ + return ( w >> c ) | ( w << ( 64 - c ) ); +} + +/* prevents compiler optimizing out memset() */ +static inline void secure_zero_memory( void *v, size_t n ) +{ + volatile uint8_t *p = ( volatile uint8_t * )v; + + while( n-- ) *p++ = 0; +} + +#endif + diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2.h b/Godeps/_workspace/src/github.com/codahale/blake2/blake2.h new file mode 100644 index 0000000..013a12c --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2.h @@ -0,0 +1,157 @@ +/* + BLAKE2 reference source code package - optimized C implementations + + Written in 2012 by Samuel Neves + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see . +*/ +#pragma once +#ifndef __BLAKE2_H__ +#define __BLAKE2_H__ + +#include +#include + +#if defined(_MSC_VER) +#define ALIGN(x) __declspec(align(x)) +#else +#define ALIGN(x) __attribute__ ((__aligned__(x))) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + + enum blake2s_constant + { + BLAKE2S_BLOCKBYTES = 64, + BLAKE2S_OUTBYTES = 32, + BLAKE2S_KEYBYTES = 32, + BLAKE2S_SALTBYTES = 8, + BLAKE2S_PERSONALBYTES = 8 + }; + + enum blake2b_constant + { + BLAKE2B_BLOCKBYTES = 128, + BLAKE2B_OUTBYTES = 64, + BLAKE2B_KEYBYTES = 64, + BLAKE2B_SALTBYTES = 16, + BLAKE2B_PERSONALBYTES = 16 + }; + +#pragma pack(push, 1) + typedef struct __blake2s_param + { + uint8_t digest_length; // 1 + uint8_t key_length; // 2 + uint8_t fanout; // 3 + uint8_t depth; // 4 + uint32_t leaf_length; // 8 + uint8_t node_offset[6];// 14 + uint8_t node_depth; // 15 + uint8_t inner_length; // 16 + // uint8_t reserved[0]; + uint8_t salt[BLAKE2S_SALTBYTES]; // 24 + uint8_t personal[BLAKE2S_PERSONALBYTES]; // 32 + } blake2s_param; + + ALIGN( 64 ) typedef struct __blake2s_state + { + uint32_t h[8]; + uint32_t t[2]; + uint32_t f[2]; + uint8_t buf[2 * BLAKE2S_BLOCKBYTES]; + size_t buflen; + uint8_t last_node; + } blake2s_state; + + typedef struct __blake2b_param + { + uint8_t digest_length; // 1 + uint8_t key_length; // 2 + uint8_t fanout; // 3 + uint8_t depth; // 4 + uint32_t leaf_length; // 8 + uint64_t node_offset; // 16 + uint8_t node_depth; // 17 + uint8_t inner_length; // 18 + uint8_t reserved[14]; // 32 + uint8_t salt[BLAKE2B_SALTBYTES]; // 48 + uint8_t personal[BLAKE2B_PERSONALBYTES]; // 64 + } blake2b_param; + + ALIGN( 64 ) typedef struct __blake2b_state + { + uint64_t h[8]; + uint64_t t[2]; + uint64_t f[2]; + uint8_t buf[2 * BLAKE2B_BLOCKBYTES]; + size_t buflen; + uint8_t last_node; + } blake2b_state; + + ALIGN( 64 ) typedef struct __blake2sp_state + { + blake2s_state S[8][1]; + blake2s_state R[1]; + uint8_t buf[8 * BLAKE2S_BLOCKBYTES]; + size_t buflen; + } blake2sp_state; + + ALIGN( 64 ) typedef struct __blake2bp_state + { + blake2b_state S[4][1]; + blake2b_state R[1]; + uint8_t buf[4 * BLAKE2B_BLOCKBYTES]; + size_t buflen; + } blake2bp_state; +#pragma pack(pop) + + // Streaming API + int blake2s_init( blake2s_state *S, const uint8_t outlen ); + int blake2s_init_key( blake2s_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ); + int blake2s_init_param( blake2s_state *S, const blake2s_param *P ); + int blake2s_update( blake2s_state *S, const uint8_t *in, uint64_t inlen ); + int blake2s_final( blake2s_state *S, uint8_t *out, uint8_t outlen ); + + int blake2b_init( blake2b_state *S, const uint8_t outlen ); + int blake2b_init_key( blake2b_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ); + int blake2b_init_parametrized( blake2b_state *S, const blake2b_param *P, const void *key ); + int blake2b_init_param( blake2b_state *S, const blake2b_param *P ); + int blake2b_update( blake2b_state *S, const uint8_t *in, uint64_t inlen ); + int blake2b_final( blake2b_state *S, uint8_t *out, uint8_t outlen ); + + int blake2sp_init( blake2sp_state *S, const uint8_t outlen ); + int blake2sp_init_key( blake2sp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ); + int blake2sp_update( blake2sp_state *S, const uint8_t *in, uint64_t inlen ); + int blake2sp_final( blake2sp_state *S, uint8_t *out, uint8_t outlen ); + + int blake2bp_init( blake2bp_state *S, const uint8_t outlen ); + int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ); + int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen ); + int blake2bp_final( blake2bp_state *S, uint8_t *out, uint8_t outlen ); + + // Simple API + int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen ); + int blake2b( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen ); + + int blake2sp( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen ); + int blake2bp( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen ); + + static inline int blake2( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen ) + { + return blake2b( out, in, key, outlen, inlen, keylen ); + } + +#if defined(__cplusplus) +} +#endif + +#endif + diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-load-sse2.h b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-load-sse2.h new file mode 100644 index 0000000..1ba153c --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-load-sse2.h @@ -0,0 +1,68 @@ +/* + BLAKE2 reference source code package - optimized C implementations + + Written in 2012 by Samuel Neves + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see . +*/ +#pragma once +#ifndef __BLAKE2B_LOAD_SSE2_H__ +#define __BLAKE2B_LOAD_SSE2_H__ + +#define LOAD_MSG_0_1(b0, b1) b0 = _mm_set_epi64x(m2, m0); b1 = _mm_set_epi64x(m6, m4) +#define LOAD_MSG_0_2(b0, b1) b0 = _mm_set_epi64x(m3, m1); b1 = _mm_set_epi64x(m7, m5) +#define LOAD_MSG_0_3(b0, b1) b0 = _mm_set_epi64x(m10, m8); b1 = _mm_set_epi64x(m14, m12) +#define LOAD_MSG_0_4(b0, b1) b0 = _mm_set_epi64x(m11, m9); b1 = _mm_set_epi64x(m15, m13) +#define LOAD_MSG_1_1(b0, b1) b0 = _mm_set_epi64x(m4, m14); b1 = _mm_set_epi64x(m13, m9) +#define LOAD_MSG_1_2(b0, b1) b0 = _mm_set_epi64x(m8, m10); b1 = _mm_set_epi64x(m6, m15) +#define LOAD_MSG_1_3(b0, b1) b0 = _mm_set_epi64x(m0, m1); b1 = _mm_set_epi64x(m5, m11) +#define LOAD_MSG_1_4(b0, b1) b0 = _mm_set_epi64x(m2, m12); b1 = _mm_set_epi64x(m3, m7) +#define LOAD_MSG_2_1(b0, b1) b0 = _mm_set_epi64x(m12, m11); b1 = _mm_set_epi64x(m15, m5) +#define LOAD_MSG_2_2(b0, b1) b0 = _mm_set_epi64x(m0, m8); b1 = _mm_set_epi64x(m13, m2) +#define LOAD_MSG_2_3(b0, b1) b0 = _mm_set_epi64x(m3, m10); b1 = _mm_set_epi64x(m9, m7) +#define LOAD_MSG_2_4(b0, b1) b0 = _mm_set_epi64x(m6, m14); b1 = _mm_set_epi64x(m4, m1) +#define LOAD_MSG_3_1(b0, b1) b0 = _mm_set_epi64x(m3, m7); b1 = _mm_set_epi64x(m11, m13) +#define LOAD_MSG_3_2(b0, b1) b0 = _mm_set_epi64x(m1, m9); b1 = _mm_set_epi64x(m14, m12) +#define LOAD_MSG_3_3(b0, b1) b0 = _mm_set_epi64x(m5, m2); b1 = _mm_set_epi64x(m15, m4) +#define LOAD_MSG_3_4(b0, b1) b0 = _mm_set_epi64x(m10, m6); b1 = _mm_set_epi64x(m8, m0) +#define LOAD_MSG_4_1(b0, b1) b0 = _mm_set_epi64x(m5, m9); b1 = _mm_set_epi64x(m10, m2) +#define LOAD_MSG_4_2(b0, b1) b0 = _mm_set_epi64x(m7, m0); b1 = _mm_set_epi64x(m15, m4) +#define LOAD_MSG_4_3(b0, b1) b0 = _mm_set_epi64x(m11, m14); b1 = _mm_set_epi64x(m3, m6) +#define LOAD_MSG_4_4(b0, b1) b0 = _mm_set_epi64x(m12, m1); b1 = _mm_set_epi64x(m13, m8) +#define LOAD_MSG_5_1(b0, b1) b0 = _mm_set_epi64x(m6, m2); b1 = _mm_set_epi64x(m8, m0) +#define LOAD_MSG_5_2(b0, b1) b0 = _mm_set_epi64x(m10, m12); b1 = _mm_set_epi64x(m3, m11) +#define LOAD_MSG_5_3(b0, b1) b0 = _mm_set_epi64x(m7, m4); b1 = _mm_set_epi64x(m1, m15) +#define LOAD_MSG_5_4(b0, b1) b0 = _mm_set_epi64x(m5, m13); b1 = _mm_set_epi64x(m9, m14) +#define LOAD_MSG_6_1(b0, b1) b0 = _mm_set_epi64x(m1, m12); b1 = _mm_set_epi64x(m4, m14) +#define LOAD_MSG_6_2(b0, b1) b0 = _mm_set_epi64x(m15, m5); b1 = _mm_set_epi64x(m10, m13) +#define LOAD_MSG_6_3(b0, b1) b0 = _mm_set_epi64x(m6, m0); b1 = _mm_set_epi64x(m8, m9) +#define LOAD_MSG_6_4(b0, b1) b0 = _mm_set_epi64x(m3, m7); b1 = _mm_set_epi64x(m11, m2) +#define LOAD_MSG_7_1(b0, b1) b0 = _mm_set_epi64x(m7, m13); b1 = _mm_set_epi64x(m3, m12) +#define LOAD_MSG_7_2(b0, b1) b0 = _mm_set_epi64x(m14, m11); b1 = _mm_set_epi64x(m9, m1) +#define LOAD_MSG_7_3(b0, b1) b0 = _mm_set_epi64x(m15, m5); b1 = _mm_set_epi64x(m2, m8) +#define LOAD_MSG_7_4(b0, b1) b0 = _mm_set_epi64x(m4, m0); b1 = _mm_set_epi64x(m10, m6) +#define LOAD_MSG_8_1(b0, b1) b0 = _mm_set_epi64x(m14, m6); b1 = _mm_set_epi64x(m0, m11) +#define LOAD_MSG_8_2(b0, b1) b0 = _mm_set_epi64x(m9, m15); b1 = _mm_set_epi64x(m8, m3) +#define LOAD_MSG_8_3(b0, b1) b0 = _mm_set_epi64x(m13, m12); b1 = _mm_set_epi64x(m10, m1) +#define LOAD_MSG_8_4(b0, b1) b0 = _mm_set_epi64x(m7, m2); b1 = _mm_set_epi64x(m5, m4) +#define LOAD_MSG_9_1(b0, b1) b0 = _mm_set_epi64x(m8, m10); b1 = _mm_set_epi64x(m1, m7) +#define LOAD_MSG_9_2(b0, b1) b0 = _mm_set_epi64x(m4, m2); b1 = _mm_set_epi64x(m5, m6) +#define LOAD_MSG_9_3(b0, b1) b0 = _mm_set_epi64x(m9, m15); b1 = _mm_set_epi64x(m13, m3) +#define LOAD_MSG_9_4(b0, b1) b0 = _mm_set_epi64x(m14, m11); b1 = _mm_set_epi64x(m0, m12) +#define LOAD_MSG_10_1(b0, b1) b0 = _mm_set_epi64x(m2, m0); b1 = _mm_set_epi64x(m6, m4) +#define LOAD_MSG_10_2(b0, b1) b0 = _mm_set_epi64x(m3, m1); b1 = _mm_set_epi64x(m7, m5) +#define LOAD_MSG_10_3(b0, b1) b0 = _mm_set_epi64x(m10, m8); b1 = _mm_set_epi64x(m14, m12) +#define LOAD_MSG_10_4(b0, b1) b0 = _mm_set_epi64x(m11, m9); b1 = _mm_set_epi64x(m15, m13) +#define LOAD_MSG_11_1(b0, b1) b0 = _mm_set_epi64x(m4, m14); b1 = _mm_set_epi64x(m13, m9) +#define LOAD_MSG_11_2(b0, b1) b0 = _mm_set_epi64x(m8, m10); b1 = _mm_set_epi64x(m6, m15) +#define LOAD_MSG_11_3(b0, b1) b0 = _mm_set_epi64x(m0, m1); b1 = _mm_set_epi64x(m5, m11) +#define LOAD_MSG_11_4(b0, b1) b0 = _mm_set_epi64x(m2, m12); b1 = _mm_set_epi64x(m3, m7) + + +#endif + diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-load-sse41.h b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-load-sse41.h new file mode 100644 index 0000000..f6c1bc8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-load-sse41.h @@ -0,0 +1,402 @@ +/* + BLAKE2 reference source code package - optimized C implementations + + Written in 2012 by Samuel Neves + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see . +*/ +#pragma once +#ifndef __BLAKE2B_LOAD_SSE41_H__ +#define __BLAKE2B_LOAD_SSE41_H__ + +#define LOAD_MSG_0_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m0, m1); \ +b1 = _mm_unpacklo_epi64(m2, m3); \ +} while(0) + + +#define LOAD_MSG_0_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m0, m1); \ +b1 = _mm_unpackhi_epi64(m2, m3); \ +} while(0) + + +#define LOAD_MSG_0_3(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m4, m5); \ +b1 = _mm_unpacklo_epi64(m6, m7); \ +} while(0) + + +#define LOAD_MSG_0_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m4, m5); \ +b1 = _mm_unpackhi_epi64(m6, m7); \ +} while(0) + + +#define LOAD_MSG_1_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m7, m2); \ +b1 = _mm_unpackhi_epi64(m4, m6); \ +} while(0) + + +#define LOAD_MSG_1_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m5, m4); \ +b1 = _mm_alignr_epi8(m3, m7, 8); \ +} while(0) + + +#define LOAD_MSG_1_3(b0, b1) \ +do \ +{ \ +b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); \ +b1 = _mm_unpackhi_epi64(m5, m2); \ +} while(0) + + +#define LOAD_MSG_1_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m6, m1); \ +b1 = _mm_unpackhi_epi64(m3, m1); \ +} while(0) + + +#define LOAD_MSG_2_1(b0, b1) \ +do \ +{ \ +b0 = _mm_alignr_epi8(m6, m5, 8); \ +b1 = _mm_unpackhi_epi64(m2, m7); \ +} while(0) + + +#define LOAD_MSG_2_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m4, m0); \ +b1 = _mm_blend_epi16(m1, m6, 0xF0); \ +} while(0) + + +#define LOAD_MSG_2_3(b0, b1) \ +do \ +{ \ +b0 = _mm_blend_epi16(m5, m1, 0xF0); \ +b1 = _mm_unpackhi_epi64(m3, m4); \ +} while(0) + + +#define LOAD_MSG_2_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m7, m3); \ +b1 = _mm_alignr_epi8(m2, m0, 8); \ +} while(0) + + +#define LOAD_MSG_3_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m3, m1); \ +b1 = _mm_unpackhi_epi64(m6, m5); \ +} while(0) + + +#define LOAD_MSG_3_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m4, m0); \ +b1 = _mm_unpacklo_epi64(m6, m7); \ +} while(0) + + +#define LOAD_MSG_3_3(b0, b1) \ +do \ +{ \ +b0 = _mm_blend_epi16(m1, m2, 0xF0); \ +b1 = _mm_blend_epi16(m2, m7, 0xF0); \ +} while(0) + + +#define LOAD_MSG_3_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m3, m5); \ +b1 = _mm_unpacklo_epi64(m0, m4); \ +} while(0) + + +#define LOAD_MSG_4_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m4, m2); \ +b1 = _mm_unpacklo_epi64(m1, m5); \ +} while(0) + + +#define LOAD_MSG_4_2(b0, b1) \ +do \ +{ \ +b0 = _mm_blend_epi16(m0, m3, 0xF0); \ +b1 = _mm_blend_epi16(m2, m7, 0xF0); \ +} while(0) + + +#define LOAD_MSG_4_3(b0, b1) \ +do \ +{ \ +b0 = _mm_blend_epi16(m7, m5, 0xF0); \ +b1 = _mm_blend_epi16(m3, m1, 0xF0); \ +} while(0) + + +#define LOAD_MSG_4_4(b0, b1) \ +do \ +{ \ +b0 = _mm_alignr_epi8(m6, m0, 8); \ +b1 = _mm_blend_epi16(m4, m6, 0xF0); \ +} while(0) + + +#define LOAD_MSG_5_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m1, m3); \ +b1 = _mm_unpacklo_epi64(m0, m4); \ +} while(0) + + +#define LOAD_MSG_5_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m6, m5); \ +b1 = _mm_unpackhi_epi64(m5, m1); \ +} while(0) + + +#define LOAD_MSG_5_3(b0, b1) \ +do \ +{ \ +b0 = _mm_blend_epi16(m2, m3, 0xF0); \ +b1 = _mm_unpackhi_epi64(m7, m0); \ +} while(0) + + +#define LOAD_MSG_5_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m6, m2); \ +b1 = _mm_blend_epi16(m7, m4, 0xF0); \ +} while(0) + + +#define LOAD_MSG_6_1(b0, b1) \ +do \ +{ \ +b0 = _mm_blend_epi16(m6, m0, 0xF0); \ +b1 = _mm_unpacklo_epi64(m7, m2); \ +} while(0) + + +#define LOAD_MSG_6_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m2, m7); \ +b1 = _mm_alignr_epi8(m5, m6, 8); \ +} while(0) + + +#define LOAD_MSG_6_3(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m0, m3); \ +b1 = _mm_shuffle_epi32(m4, _MM_SHUFFLE(1,0,3,2)); \ +} while(0) + + +#define LOAD_MSG_6_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m3, m1); \ +b1 = _mm_blend_epi16(m1, m5, 0xF0); \ +} while(0) + + +#define LOAD_MSG_7_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m6, m3); \ +b1 = _mm_blend_epi16(m6, m1, 0xF0); \ +} while(0) + + +#define LOAD_MSG_7_2(b0, b1) \ +do \ +{ \ +b0 = _mm_alignr_epi8(m7, m5, 8); \ +b1 = _mm_unpackhi_epi64(m0, m4); \ +} while(0) + + +#define LOAD_MSG_7_3(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m2, m7); \ +b1 = _mm_unpacklo_epi64(m4, m1); \ +} while(0) + + +#define LOAD_MSG_7_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m0, m2); \ +b1 = _mm_unpacklo_epi64(m3, m5); \ +} while(0) + + +#define LOAD_MSG_8_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m3, m7); \ +b1 = _mm_alignr_epi8(m0, m5, 8); \ +} while(0) + + +#define LOAD_MSG_8_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m7, m4); \ +b1 = _mm_alignr_epi8(m4, m1, 8); \ +} while(0) + + +#define LOAD_MSG_8_3(b0, b1) \ +do \ +{ \ +b0 = m6; \ +b1 = _mm_alignr_epi8(m5, m0, 8); \ +} while(0) + + +#define LOAD_MSG_8_4(b0, b1) \ +do \ +{ \ +b0 = _mm_blend_epi16(m1, m3, 0xF0); \ +b1 = m2; \ +} while(0) + + +#define LOAD_MSG_9_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m5, m4); \ +b1 = _mm_unpackhi_epi64(m3, m0); \ +} while(0) + + +#define LOAD_MSG_9_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m1, m2); \ +b1 = _mm_blend_epi16(m3, m2, 0xF0); \ +} while(0) + + +#define LOAD_MSG_9_3(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m7, m4); \ +b1 = _mm_unpackhi_epi64(m1, m6); \ +} while(0) + + +#define LOAD_MSG_9_4(b0, b1) \ +do \ +{ \ +b0 = _mm_alignr_epi8(m7, m5, 8); \ +b1 = _mm_unpacklo_epi64(m6, m0); \ +} while(0) + + +#define LOAD_MSG_10_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m0, m1); \ +b1 = _mm_unpacklo_epi64(m2, m3); \ +} while(0) + + +#define LOAD_MSG_10_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m0, m1); \ +b1 = _mm_unpackhi_epi64(m2, m3); \ +} while(0) + + +#define LOAD_MSG_10_3(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m4, m5); \ +b1 = _mm_unpacklo_epi64(m6, m7); \ +} while(0) + + +#define LOAD_MSG_10_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpackhi_epi64(m4, m5); \ +b1 = _mm_unpackhi_epi64(m6, m7); \ +} while(0) + + +#define LOAD_MSG_11_1(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m7, m2); \ +b1 = _mm_unpackhi_epi64(m4, m6); \ +} while(0) + + +#define LOAD_MSG_11_2(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m5, m4); \ +b1 = _mm_alignr_epi8(m3, m7, 8); \ +} while(0) + + +#define LOAD_MSG_11_3(b0, b1) \ +do \ +{ \ +b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); \ +b1 = _mm_unpackhi_epi64(m5, m2); \ +} while(0) + + +#define LOAD_MSG_11_4(b0, b1) \ +do \ +{ \ +b0 = _mm_unpacklo_epi64(m6, m1); \ +b1 = _mm_unpackhi_epi64(m3, m1); \ +} while(0) + + +#endif + diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-round.h b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-round.h new file mode 100644 index 0000000..5dfc1fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b-round.h @@ -0,0 +1,160 @@ +/* + BLAKE2 reference source code package - optimized C implementations + + Written in 2012 by Samuel Neves + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see . +*/ +#pragma once +#ifndef __BLAKE2B_ROUND_H__ +#define __BLAKE2B_ROUND_H__ + +#define LOAD(p) _mm_load_si128( (__m128i *)(p) ) +#define STORE(p,r) _mm_store_si128((__m128i *)(p), r) + +#define LOADU(p) _mm_loadu_si128( (__m128i *)(p) ) +#define STOREU(p,r) _mm_storeu_si128((__m128i *)(p), r) + +#define TOF(reg) _mm_castsi128_ps((reg)) +#define TOI(reg) _mm_castps_si128((reg)) + +#define LIKELY(x) __builtin_expect((x),1) + + +/* Microarchitecture-specific macros */ +#ifndef HAVE_XOP +#ifdef HAVE_SSSE3 +#define _mm_roti_epi64(x, c) \ + (-(c) == 32) ? _mm_shuffle_epi32((x), _MM_SHUFFLE(2,3,0,1)) \ + : (-(c) == 24) ? _mm_shuffle_epi8((x), r24) \ + : (-(c) == 16) ? _mm_shuffle_epi8((x), r16) \ + : (-(c) == 63) ? _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_add_epi64((x), (x))) \ + : _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_slli_epi64((x), 64-(-(c)))) +#else +#define _mm_roti_epi64(r, c) _mm_xor_si128(_mm_srli_epi64( (r), -(c) ),_mm_slli_epi64( (r), 64-(-c) )) +#endif +#else +/* ... */ +#endif + + + +#define G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ + \ + row4l = _mm_xor_si128(row4l, row1l); \ + row4h = _mm_xor_si128(row4h, row1h); \ + \ + row4l = _mm_roti_epi64(row4l, -32); \ + row4h = _mm_roti_epi64(row4h, -32); \ + \ + row3l = _mm_add_epi64(row3l, row4l); \ + row3h = _mm_add_epi64(row3h, row4h); \ + \ + row2l = _mm_xor_si128(row2l, row3l); \ + row2h = _mm_xor_si128(row2h, row3h); \ + \ + row2l = _mm_roti_epi64(row2l, -24); \ + row2h = _mm_roti_epi64(row2h, -24); \ + +#define G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ + \ + row4l = _mm_xor_si128(row4l, row1l); \ + row4h = _mm_xor_si128(row4h, row1h); \ + \ + row4l = _mm_roti_epi64(row4l, -16); \ + row4h = _mm_roti_epi64(row4h, -16); \ + \ + row3l = _mm_add_epi64(row3l, row4l); \ + row3h = _mm_add_epi64(row3h, row4h); \ + \ + row2l = _mm_xor_si128(row2l, row3l); \ + row2h = _mm_xor_si128(row2h, row3h); \ + \ + row2l = _mm_roti_epi64(row2l, -63); \ + row2h = _mm_roti_epi64(row2h, -63); \ + +#if defined(HAVE_SSSE3) +#define DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + t0 = _mm_alignr_epi8(row2h, row2l, 8); \ + t1 = _mm_alignr_epi8(row2l, row2h, 8); \ + row2l = t0; \ + row2h = t1; \ + \ + t0 = row3l; \ + row3l = row3h; \ + row3h = t0; \ + \ + t0 = _mm_alignr_epi8(row4h, row4l, 8); \ + t1 = _mm_alignr_epi8(row4l, row4h, 8); \ + row4l = t1; \ + row4h = t0; + +#define UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + t0 = _mm_alignr_epi8(row2l, row2h, 8); \ + t1 = _mm_alignr_epi8(row2h, row2l, 8); \ + row2l = t0; \ + row2h = t1; \ + \ + t0 = row3l; \ + row3l = row3h; \ + row3h = t0; \ + \ + t0 = _mm_alignr_epi8(row4l, row4h, 8); \ + t1 = _mm_alignr_epi8(row4h, row4l, 8); \ + row4l = t1; \ + row4h = t0; +#else + +#define DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + t0 = row4l;\ + t1 = row2l;\ + row4l = row3l;\ + row3l = row3h;\ + row3h = row4l;\ + row4l = _mm_unpackhi_epi64(row4h, _mm_unpacklo_epi64(t0, t0)); \ + row4h = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(row4h, row4h)); \ + row2l = _mm_unpackhi_epi64(row2l, _mm_unpacklo_epi64(row2h, row2h)); \ + row2h = _mm_unpackhi_epi64(row2h, _mm_unpacklo_epi64(t1, t1)) + +#define UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + t0 = row3l;\ + row3l = row3h;\ + row3h = t0;\ + t0 = row2l;\ + t1 = row4l;\ + row2l = _mm_unpackhi_epi64(row2h, _mm_unpacklo_epi64(row2l, row2l)); \ + row2h = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(row2h, row2h)); \ + row4l = _mm_unpackhi_epi64(row4l, _mm_unpacklo_epi64(row4h, row4h)); \ + row4h = _mm_unpackhi_epi64(row4h, _mm_unpacklo_epi64(t1, t1)) + +#endif + +#if defined(HAVE_SSE41) +#include "blake2b-load-sse41.h" +#else +#include "blake2b-load-sse2.h" +#endif + +#define ROUND(r) \ + LOAD_MSG_ ##r ##_1(b0, b1); \ + G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + LOAD_MSG_ ##r ##_2(b0, b1); \ + G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ + LOAD_MSG_ ##r ##_3(b0, b1); \ + G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + LOAD_MSG_ ##r ##_4(b0, b1); \ + G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + +#endif + diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2b.c b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b.c new file mode 100644 index 0000000..0d8c48a --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b.c @@ -0,0 +1,454 @@ +/* + BLAKE2 reference source code package - optimized C implementations + + Written in 2012 by Samuel Neves + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see . +*/ + +#include +#include +#include + +#include "blake2.h" +#include "blake2-impl.h" + +#include "blake2-config.h" + + +#include +#if defined(HAVE_SSSE3) +#include +#endif +#if defined(HAVE_SSE41) +#include +#endif +#if defined(HAVE_AVX) +#include +#endif +#if defined(HAVE_XOP) +#include +#endif + +#include "blake2b-round.h" + +ALIGN( 64 ) static const uint64_t blake2b_IV[8] = +{ + 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, + 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL, + 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, + 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL +}; + +static const uint8_t blake2b_sigma[12][16] = +{ + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } , + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } , + { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } , + { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } , + { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } , + { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } , + { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } , + { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } , + { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } , + { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } , + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } , + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } +}; + + +/* Some helper functions, not necessarily useful */ +static inline int blake2b_set_lastnode( blake2b_state *S ) +{ + S->f[1] = ~0ULL; + return 0; +} + +static inline int blake2b_clear_lastnode( blake2b_state *S ) +{ + S->f[1] = 0ULL; + return 0; +} + +static inline int blake2b_set_lastblock( blake2b_state *S ) +{ + if( S->last_node ) blake2b_set_lastnode( S ); + + S->f[0] = ~0ULL; + return 0; +} + +static inline int blake2b_clear_lastblock( blake2b_state *S ) +{ + if( S->last_node ) blake2b_clear_lastnode( S ); + + S->f[0] = 0ULL; + return 0; +} + + +static inline int blake2b_increment_counter( blake2b_state *S, const uint64_t inc ) +{ +#if __x86_64__ + // ADD/ADC chain + __uint128_t t = ( ( __uint128_t )S->t[1] << 64 ) | S->t[0]; + t += inc; + S->t[0] = ( uint64_t )( t >> 0 ); + S->t[1] = ( uint64_t )( t >> 64 ); +#else + S->t[0] += inc; + S->t[1] += ( S->t[0] < inc ); +#endif + return 0; +} + + +// Parameter-related functions +static inline int blake2b_param_set_digest_length( blake2b_param *P, const uint8_t digest_length ) +{ + P->digest_length = digest_length; + return 0; +} + +static inline int blake2b_param_set_fanout( blake2b_param *P, const uint8_t fanout ) +{ + P->fanout = fanout; + return 0; +} + +static inline int blake2b_param_set_max_depth( blake2b_param *P, const uint8_t depth ) +{ + P->depth = depth; + return 0; +} + +static inline int blake2b_param_set_leaf_length( blake2b_param *P, const uint32_t leaf_length ) +{ + P->leaf_length = leaf_length; + return 0; +} + +static inline int blake2b_param_set_node_offset( blake2b_param *P, const uint64_t node_offset ) +{ + P->node_offset = node_offset; + return 0; +} + +static inline int blake2b_param_set_node_depth( blake2b_param *P, const uint8_t node_depth ) +{ + P->node_depth = node_depth; + return 0; +} + +static inline int blake2b_param_set_inner_length( blake2b_param *P, const uint8_t inner_length ) +{ + P->inner_length = inner_length; + return 0; +} + +static inline int blake2b_param_set_salt( blake2b_param *P, const uint8_t salt[BLAKE2B_SALTBYTES] ) +{ + memcpy( P->salt, salt, BLAKE2B_SALTBYTES ); + return 0; +} + +static inline int blake2b_param_set_personal( blake2b_param *P, const uint8_t personal[BLAKE2B_PERSONALBYTES] ) +{ + memcpy( P->personal, personal, BLAKE2B_PERSONALBYTES ); + return 0; +} + +static inline int blake2b_init0( blake2b_state *S ) +{ + memset( S, 0, sizeof( blake2b_state ) ); + + int i; + for( i = 0; i < 8; ++i ) S->h[i] = blake2b_IV[i]; + + return 0; +} + +/* init xors IV with input parameter block */ +int blake2b_init_param( blake2b_state *S, const blake2b_param *P ) +{ + uint8_t *p, *h, *v; + //blake2b_init0( S ); + v = ( uint8_t * )( blake2b_IV ); + h = ( uint8_t * )( S->h ); + p = ( uint8_t * )( P ); + /* IV XOR ParamBlock */ + memset( S, 0, sizeof( blake2b_state ) ); + + int i; + for( i = 0; i < BLAKE2B_OUTBYTES; ++i ) h[i] = v[i] ^ p[i]; + + return 0; +} + + +/* Some sort of default parameter block initialization, for sequential blake2b */ +int blake2b_init( blake2b_state *S, const uint8_t outlen ) +{ + if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; + + const blake2b_param P = + { + outlen, + 0, + 1, + 1, + 0, + 0, + 0, + 0, + {0}, + {0}, + {0} + }; + return blake2b_init_param( S, &P ); +} + +int blake2b_init_key( blake2b_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) +{ + if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; + + if ( ( !keylen ) || keylen > BLAKE2B_KEYBYTES ) return -1; + + const blake2b_param P = + { + outlen, + keylen, + 1, + 1, + 0, + 0, + 0, + 0, + {0}, + {0}, + {0} + }; + + if( blake2b_init_param( S, &P ) < 0 ) + return 0; + + { + uint8_t block[BLAKE2B_BLOCKBYTES]; + memset( block, 0, BLAKE2B_BLOCKBYTES ); + memcpy( block, key, keylen ); + blake2b_update( S, block, BLAKE2B_BLOCKBYTES ); + secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ + } + return 0; +} + +int blake2b_init_parametrized( blake2b_state *S, const blake2b_param *P, const void *key ) +{ + if ( ( !P->digest_length ) || ( P->digest_length > BLAKE2B_OUTBYTES ) ) return -1; + + if ( P->key_length > BLAKE2B_KEYBYTES ) return -1; + + if( blake2b_init_param( S, P ) < 0 ) + return 0; + + if (P->key_length > 0) + { + uint8_t block[BLAKE2B_BLOCKBYTES]; + memset( block, 0, BLAKE2B_BLOCKBYTES ); + memcpy( block, key, P->key_length ); + blake2b_update( S, block, BLAKE2B_BLOCKBYTES ); + secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ + } + return 0; +} + + +static inline int blake2b_compress( blake2b_state *S, const uint8_t block[BLAKE2B_BLOCKBYTES] ) +{ + __m128i row1l, row1h; + __m128i row2l, row2h; + __m128i row3l, row3h; + __m128i row4l, row4h; + __m128i b0, b1; + __m128i t0, t1; +#if defined(HAVE_SSSE3) && !defined(HAVE_XOP) + const __m128i r16 = _mm_setr_epi8( 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9 ); + const __m128i r24 = _mm_setr_epi8( 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10 ); +#endif +#if defined(HAVE_SSE41) + const __m128i m0 = LOADU( block + 00 ); + const __m128i m1 = LOADU( block + 16 ); + const __m128i m2 = LOADU( block + 32 ); + const __m128i m3 = LOADU( block + 48 ); + const __m128i m4 = LOADU( block + 64 ); + const __m128i m5 = LOADU( block + 80 ); + const __m128i m6 = LOADU( block + 96 ); + const __m128i m7 = LOADU( block + 112 ); +#else + const uint64_t m0 = ( ( uint64_t * )block )[ 0]; + const uint64_t m1 = ( ( uint64_t * )block )[ 1]; + const uint64_t m2 = ( ( uint64_t * )block )[ 2]; + const uint64_t m3 = ( ( uint64_t * )block )[ 3]; + const uint64_t m4 = ( ( uint64_t * )block )[ 4]; + const uint64_t m5 = ( ( uint64_t * )block )[ 5]; + const uint64_t m6 = ( ( uint64_t * )block )[ 6]; + const uint64_t m7 = ( ( uint64_t * )block )[ 7]; + const uint64_t m8 = ( ( uint64_t * )block )[ 8]; + const uint64_t m9 = ( ( uint64_t * )block )[ 9]; + const uint64_t m10 = ( ( uint64_t * )block )[10]; + const uint64_t m11 = ( ( uint64_t * )block )[11]; + const uint64_t m12 = ( ( uint64_t * )block )[12]; + const uint64_t m13 = ( ( uint64_t * )block )[13]; + const uint64_t m14 = ( ( uint64_t * )block )[14]; + const uint64_t m15 = ( ( uint64_t * )block )[15]; +#endif + row1l = LOAD( &S->h[0] ); + row1h = LOAD( &S->h[2] ); + row2l = LOAD( &S->h[4] ); + row2h = LOAD( &S->h[6] ); + row3l = LOAD( &blake2b_IV[0] ); + row3h = LOAD( &blake2b_IV[2] ); + row4l = _mm_xor_si128( LOAD( &blake2b_IV[4] ), LOAD( &S->t[0] ) ); + row4h = _mm_xor_si128( LOAD( &blake2b_IV[6] ), LOAD( &S->f[0] ) ); + ROUND( 0 ); + ROUND( 1 ); + ROUND( 2 ); + ROUND( 3 ); + ROUND( 4 ); + ROUND( 5 ); + ROUND( 6 ); + ROUND( 7 ); + ROUND( 8 ); + ROUND( 9 ); + ROUND( 10 ); + ROUND( 11 ); + row1l = _mm_xor_si128( row3l, row1l ); + row1h = _mm_xor_si128( row3h, row1h ); + STORE( &S->h[0], _mm_xor_si128( LOAD( &S->h[0] ), row1l ) ); + STORE( &S->h[2], _mm_xor_si128( LOAD( &S->h[2] ), row1h ) ); + row2l = _mm_xor_si128( row4l, row2l ); + row2h = _mm_xor_si128( row4h, row2h ); + STORE( &S->h[4], _mm_xor_si128( LOAD( &S->h[4] ), row2l ) ); + STORE( &S->h[6], _mm_xor_si128( LOAD( &S->h[6] ), row2h ) ); + return 0; +} + + +int blake2b_update( blake2b_state *S, const uint8_t *in, uint64_t inlen ) +{ + while( inlen > 0 ) + { + size_t left = S->buflen; + size_t fill = 2 * BLAKE2B_BLOCKBYTES - left; + + if( inlen > fill ) + { + memcpy( S->buf + left, in, fill ); // Fill buffer + S->buflen += fill; + blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES ); + blake2b_compress( S, S->buf ); // Compress + memcpy( S->buf, S->buf + BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); // Shift buffer left + S->buflen -= BLAKE2B_BLOCKBYTES; + in += fill; + inlen -= fill; + } + else // inlen <= fill + { + memcpy( S->buf + left, in, inlen ); + S->buflen += inlen; // Be lazy, do not compress + in += inlen; + inlen -= inlen; + } + } + + return 0; +} + + +int blake2b_final( blake2b_state *S, uint8_t *out, uint8_t outlen ) +{ + if( S->buflen > BLAKE2B_BLOCKBYTES ) + { + blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES ); + blake2b_compress( S, S->buf ); + S->buflen -= BLAKE2B_BLOCKBYTES; + memcpy( S->buf, S->buf + BLAKE2B_BLOCKBYTES, S->buflen ); + } + + blake2b_increment_counter( S, S->buflen ); + blake2b_set_lastblock( S ); + memset( S->buf + S->buflen, 0, 2 * BLAKE2B_BLOCKBYTES - S->buflen ); /* Padding */ + blake2b_compress( S, S->buf ); + memcpy( out, &S->h[0], outlen ); + return 0; +} + + +int blake2b( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen ) +{ + blake2b_state S[1]; + + /* Verify parameters */ + if ( NULL == in ) return -1; + + if ( NULL == out ) return -1; + + if( NULL == key ) keylen = 0; + + if( keylen ) + { + if( blake2b_init_key( S, outlen, key, keylen ) < 0 ) return -1; + } + else + { + if( blake2b_init( S, outlen ) < 0 ) return -1; + } + + blake2b_update( S, ( uint8_t * )in, inlen ); + blake2b_final( S, out, outlen ); + return 0; +} + +#if defined(SUPERCOP) +int crypto_hash( unsigned char *out, unsigned char *in, unsigned long long inlen ) +{ + return blake2b( out, in, NULL, BLAKE2B_OUTBYTES, inlen, 0 ); +} +#endif + +#if defined(BLAKE2B_SELFTEST) +#include +#include "blake2-kat.h" +int main( int argc, char **argv ) +{ + uint8_t key[BLAKE2B_KEYBYTES]; + uint8_t buf[KAT_LENGTH]; + + for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i ) + key[i] = ( uint8_t )i; + + for( size_t i = 0; i < KAT_LENGTH; ++i ) + buf[i] = ( uint8_t )i; + + for( size_t i = 0; i < KAT_LENGTH; ++i ) + { + uint8_t hash[BLAKE2B_OUTBYTES]; + blake2b( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES ); + + if( 0 != memcmp( hash, blake2b_keyed_kat[i], BLAKE2B_OUTBYTES ) ) + { + puts( "error" ); + return -1; + } + } + + puts( "ok" ); + return 0; +} +#endif + diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2b.go b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b.go new file mode 100644 index 0000000..9b0dc40 --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b.go @@ -0,0 +1,162 @@ +// Package blake2 provides a Go wrapper around an optimized, public domain +// implementation of BLAKE2. +// The cryptographic hash function BLAKE2 is an improved version of the SHA-3 +// finalist BLAKE. Like BLAKE or SHA-3, BLAKE2 offers the highest security, yet +// is fast as MD5 on 64-bit platforms and requires at least 33% less RAM than +// SHA-2 or SHA-3 on low-end systems. +package blake2 + +import ( + // #cgo CFLAGS: -O3 + // #include "blake2.h" + "C" + "hash" + "unsafe" +) + +type digest struct { + state *C.blake2b_state + key []byte + param C.blake2b_param + isLastNode bool +} + +const ( + SaltSize = C.BLAKE2B_SALTBYTES + PersonalSize = C.BLAKE2B_PERSONALBYTES +) + +// Tree contains parameters for tree hashing. Each node in the tree +// can be hashed concurrently, and incremental changes can be done in +// a Merkle tree fashion. +type Tree struct { + // Fanout: how many children each tree node has. 0 for unlimited. + // 1 means sequential mode. + Fanout uint8 + // Maximal depth of the tree. Beyond this height, nodes are just + // added to the root of the tree. 255 for unlimited. 1 means + // sequential mode. + MaxDepth uint8 + // Leaf maximal byte length, how much data each leaf summarizes. 0 + // for unlimited or sequential mode. + LeafSize uint32 + // Depth of this node. 0 for leaves or sequential mode. + NodeDepth uint8 + // Offset of this node within this level of the tree. 0 for the + // first, leftmost, leaf, or sequential mode. + NodeOffset uint64 + // Inner hash byte length, in the range [0, 64]. 0 for sequential + // mode. + InnerHashSize uint8 + + // IsLastNode indicates this node is the last, rightmost, node of + // a level of the tree. + IsLastNode bool +} + +// Config contains parameters for the hash function that affect its +// output. +type Config struct { + // Digest byte length, in the range [1, 64]. If 0, default size of 64 bytes is used. + Size uint8 + // Key is up to 64 arbitrary bytes, for keyed hashing mode. Can be nil. + Key []byte + // Salt is up to 16 arbitrary bytes, used to randomize the hash. Can be nil. + Salt []byte + // Personal is up to 16 arbitrary bytes, used to make the hash + // function unique for each application. Can be nil. + Personal []byte + + // Parameters for tree hashing. Set to nil to use default + // sequential mode. + Tree *Tree +} + +// New returns a new custom BLAKE2b hash. +// +// If config is nil, uses a 64-byte digest size. +func New(config *Config) hash.Hash { + d := &digest{ + param: C.blake2b_param{ + digest_length: 64, + fanout: 1, + depth: 1, + }, + } + if config != nil { + if config.Size != 0 { + d.param.digest_length = C.uint8_t(config.Size) + } + if len(config.Key) > 0 { + // let the C library worry about the exact limit; we just + // worry about fitting into the variable + if len(config.Key) > 255 { + panic("blake2b key too long") + } + d.param.key_length = C.uint8_t(len(config.Key)) + d.key = config.Key + } + salt := (*[C.BLAKE2B_SALTBYTES]byte)(unsafe.Pointer(&d.param.salt[0])) + copy(salt[:], config.Salt) + personal := (*[C.BLAKE2B_SALTBYTES]byte)(unsafe.Pointer(&d.param.personal[0])) + copy(personal[:], config.Personal) + + if config.Tree != nil { + d.param.fanout = C.uint8_t(config.Tree.Fanout) + d.param.depth = C.uint8_t(config.Tree.MaxDepth) + d.param.leaf_length = C.uint32_t(config.Tree.LeafSize) + d.param.node_offset = C.uint64_t(config.Tree.NodeOffset) + d.param.node_depth = C.uint8_t(config.Tree.NodeDepth) + d.param.inner_length = C.uint8_t(config.Tree.InnerHashSize) + + d.isLastNode = config.Tree.IsLastNode + } + } + d.Reset() + return d +} + +// NewBlake2B returns a new 512-bit BLAKE2B hash. +func NewBlake2B() hash.Hash { + return New(&Config{Size: 64}) +} + +// NewKeyedBlake2B returns a new 512-bit BLAKE2B hash with the given secret key. +func NewKeyedBlake2B(key []byte) hash.Hash { + return New(&Config{Size: 64, Key: key}) +} + +func (*digest) BlockSize() int { + return 128 +} + +func (d *digest) Size() int { + return int(d.param.digest_length) +} + +func (d *digest) Reset() { + d.state = new(C.blake2b_state) + var key unsafe.Pointer + if d.param.key_length > 0 { + key = unsafe.Pointer(&d.key[0]) + } + if C.blake2b_init_parametrized(d.state, &d.param, key) < 0 { + panic("blake2: unable to reset") + } + if d.isLastNode { + d.state.last_node = C.uint8_t(1) + } +} + +func (d *digest) Sum(buf []byte) []byte { + digest := make([]byte, d.Size()) + C.blake2b_final(d.state, (*C.uint8_t)(&digest[0]), C.uint8_t(d.Size())) + return append(buf, digest...) +} + +func (d *digest) Write(buf []byte) (int, error) { + if len(buf) > 0 { + C.blake2b_update(d.state, (*C.uint8_t)(&buf[0]), C.uint64_t(len(buf))) + } + return len(buf), nil +} diff --git a/Godeps/_workspace/src/github.com/codahale/blake2/blake2b_test.go b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b_test.go new file mode 100644 index 0000000..4f10c8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/codahale/blake2/blake2b_test.go @@ -0,0 +1,636 @@ +package blake2 + +import ( + "fmt" + "testing" +) + +var unkeyed2B = []string{ + "786A02F742015903C6C6FD852552D272912F4740E15847618A86E217F71F5419D25E1031AFEE585313896444934EB04B903A685B1448B755D56F701AFE9BE2CE", + "2FA3F686DF876995167E7C2E5D74C4C7B6E48F8068FE0E44208344D480F7904C36963E44115FE3EB2A3AC8694C28BCB4F5A0F3276F2E79487D8219057A506E4B", + "1C08798DC641ABA9DEE435E22519A4729A09B2BFE0FF00EF2DCD8ED6F8A07D15EAF4AEE52BBF18AB5608A6190F70B90486C8A7D4873710B1115D3DEBBB4327B5", + "40A374727302D9A4769C17B5F409FF32F58AA24FF122D7603E4FDA1509E919D4107A52C57570A6D94E50967AEA573B11F86F473F537565C66F7039830A85D186", + "77DDF4B14425EB3D053C1E84E3469D92C4CD910ED20F92035E0C99D8A7A86CECAF69F9663C20A7AA230BC82F60D22FB4A00B09D3EB8FC65EF547FE63C8D3DDCE", + "CBAA0BA7D482B1F301109AE41051991A3289BC1198005AF226C5E4F103B66579F461361044C8BA3439FF12C515FB29C52161B7EB9C2837B76A5DC33F7CB2E2E8", + "F95D45CF69AF5C2023BDB505821E62E85D7CAEDF7BEDA12C0248775B0C88205EEB35AF3A90816F6608CE7DD44EC28DB1140614E1DDEBF3AA9CD1843E0FAD2C36", + "8F945BA700F2530E5C2A7DF7D5DCE0F83F9EFC78C073FE71AE1F88204A4FD1CF70A073F5D1F942ED623AA16E90A871246C90C45B621B3401A5DDBD9DF6264165", + "E998E0DC03EC30EB99BB6BFAAF6618ACC620320D7220B3AF2B23D112D8E9CB1262F3C0D60D183B1EE7F096D12DAE42C958418600214D04F5ED6F5E718BE35566", + "6A9A090C61B3410AEDE7EC9138146CEB2C69662F460C3DA53C6515C1EB31F41CA3D280E567882F95CF664A94147D78F42CFC714A40D22EF19470E053493508A2", + "29102511D749DB3CC9B4E335FA1F5E8FACA8421D558F6A3F3321D50D044A248BA595CFC3EFD3D2ADC97334DA732413F5CBF4751C362BA1D53862AC1E8DABEEE8", + "C97A4779D47E6F77729B5917D0138ABB35980AB641BD73A8859EB1AC98C05362ED7D608F2E9587D6BA9E271D343125D40D933A8ED04EC1FE75EC407C7A53C34E", + "10F0DC91B9F845FB95FAD6860E6CE1ADFA002C7FC327116D44D047CD7D5870D772BB12B5FAC00E02B08AC2A0174D0446C36AB35F14CA31894CD61C78C849B48A", + "DEA9101CAC62B8F6A3C650F90EEA5BFAE2653A4EAFD63A6D1F0F132DB9E4F2B1B662432EC85B17BCAC41E775637881F6AAB38DD66DCBD080F0990A7A6E9854FE", + "441FFAA08CD79DFF4AFC9B9E5B5620EEC086730C25F661B1D6FBFBD1CEC3148DD72258C65641F2FCA5EB155FADBCABB13C6E21DC11FAF72C2A281B7D56145F19", + "444B240FE3ED86D0E2EF4CE7D851EDDE22155582AA0914797B726CD058B6F45932E0E129516876527B1DD88FC66D7119F4AB3BED93A61A0E2D2D2AEAC336D958", + "BFBABBEF45554CCFA0DC83752A19CC35D5920956B301D558D772282BC867009168E9E98606BB5BA73A385DE5749228C925A85019B71F72FE29B3CD37CA52EFE6", + "9C4D0C3E1CDBBF485BEC86F41CEC7C98373F0E09F392849AAA229EBFBF397B22085529CB7EF39F9C7C2222A514182B1EFFAA178CC3687B1B2B6CBCB6FDEB96F8", + "477176B3BFCBADD7657C23C24625E4D0D674D1868F006006398AF97AA41877C8E70D3D14C3BBC9BBCDCEA801BD0E1599AF1F3EEC67405170F4E26C964A57A8B7", + "A78C490EDA3173BB3F10DEE52F110FB1C08E0302230B85DDD7C11257D92DE148785EF00C039C0BB8EB9808A35B2D8C080F572859714C9D4069C5BCAF090E898E", + "58D023397BEB5B4145CB2255B07D74290B36D9FD1E594AFBD8EEA47C205B2EFBFE6F46190FAF95AF504AB072E36F6C85D767A321BFD7F22687A4ABBF494A689C", + "4001EC74D5A46FD29C2C3CDBE5D1B9F20E51A941BE98D2A4E1E2FBF866A672121DB6F81A514CFD10E7358D571BDBA48E4CE708B9D124894BC0B5ED554935F73A", + "CCD1B22DAB6511225D2401EA2D8625D206A12473CC732B615E5640CEFFF0A4ADF971B0E827A619E0A80F5DB9CCD0962329010D07E34A2064E731C520817B2183", + "B4A0A9E3574EDB9E1E72AA31E39CC5F30DBF943F8CABC408449654A39131E66D718A18819143E3EA96B4A1895988A1C0056CF2B6E04F9AC19D657383C2910C44", + "447BECAB16630608D39F4F058B16F7AF95B85A76AA0FA7CEA2B80755FB76E9C804F2CA78F02643C915FBF2FCE5E19DE86000DE03B18861815A83126071F8A37B", + "54E6DAB9977380A5665822DB93374EDA528D9BEB626F9B94027071CB26675E112B4A7FEC941EE60A81E4D2EA3FF7BC52CFC45DFBFE735A1C646B2CF6D6A49B62", + "3EA62625949E3646704D7E3C906F82F6C028F540F5F72A794B0C57BF97B7649BFEB90B01D3CA3E829DE21B3826E6F87014D3C77350CB5A15FF5D468A81BEC160", + "213CFE145C54A33691569980E5938C8883A46D84D149C8FF1A67CD287B4D49C6DA69D3A035443DB085983D0EFE63706BD5B6F15A7DA459E8D50A19093DB55E80", + "5716C4A38F38DB104E494A0A27CBE89A26A6BB6F499EC01C8C01AA7CB88497E75148CD6EEE12A7168B6F78AB74E4BE749251A1A74C38C86D6129177E2889E0B6", + "030460A98BDF9FF17CD96404F28FC304F2B7C04EAADE53677FD28F788CA22186B8BC80DD21D17F8549C711AFF0E514E19D4E15F5990252A03E082F28DC2052F6", + "19E7F1CCEE88A10672333E390CF22013A8C734C6CB9EAB41F17C3C8032A2E4ACA0569EA36F0860C7A1AF28FA476840D66011168859334A9E4EF9CC2E61A0E29E", + "29F8B8C78C80F2FCB4BDF7825ED90A70D625FF785D262677E250C04F3720C888D03F8045E4EDF3F5285BD39D928A10A7D0A5DF00B8484AC2868142A1E8BEA351", + "5C52920A7263E39D57920CA0CB752AC6D79A04FEF8A7A216A1ECB7115CE06D89FD7D735BD6F4272555DBA22C2D1C96E6352322C62C5630FDE0F4777A76C3DE2C", + "83B098F262251BF660064A9D3511CE7687A09E6DFBB878299C30E93DFB43A9314DB9A600337DB26EBEEDAF2256A96DABE9B29E7573AD11C3523D874DDE5BE7ED", + "9447D98AA5C9331352F43D3E56D0A9A9F9581865998E2885CC56DD0A0BD5A7B50595BD10F7529BCD31F37DC16A1465D594079667DA2A3FCB70401498837CEDEB", + "867732F2FEEB23893097561AC710A4BFF453BE9CFBEDBA8BA324F9D312A82D732E1B83B829FDCD177B882CA0C1BF544B223BE529924A246A63CF059BFDC50A1B", + "F15AB26D4CDFCF56E196BB6BA170A8FCCC414DE9285AFD98A3D3CF2FB88FCBC0F19832AC433A5B2CC2392A4CE34332987D8D2C2BEF6C3466138DB0C6E42FA47B", + "2813516D68ED4A08B39D648AA6AACD81E9D655ECD5F0C13556C60FDF0D333EA38464B36C02BACCD746E9575E96C63014F074AE34A0A25B320F0FBEDD6ACF7665", + "D3259AFCA8A48962FA892E145ACF547F26923AE8D4924C8A531581526B04B44C7AF83C643EF5A0BC282D36F3FB04C84E28B351F40C74B69DC7840BC717B6F15F", + "F14B061AE359FA31B989E30332BFE8DE8CC8CDB568E14BE214A2223B84CAAB7419549ECFCC96CE2ACEC119485D87D157D3A8734FC426597D64F36570CEAF224D", + "55E70B01D1FBF8B23B57FB62E26C2CE54F13F8FA2464E6EB98D16A6117026D8B90819012496D4071EBE2E59557ECE3519A7AA45802F9615374877332B73490B3", + "25261EB296971D6E4A71B2928E64839C67D422872BF9F3C31993615222DE9F8F0B2C4BE8548559B4B354E736416E3218D4E8A1E219A4A6D43E1A9A521D0E75FC", + "08307F347C41294E34BB54CB42B1522D22F824F7B6E5DB50FDA096798E181A8F026FA27B4AE45D52A62CAF9D5198E24A4913C6671775B2D723C1239BFBF016D7", + "1E5C62E7E9BFA1B118747A2DE08B3CA10112AF96A46E4B22C3FC06F9BFEE4EB5C49E057A4A4886234324572576BB9B5ECFDE0D99B0DE4F98EC16E4D1B85FA947", + "C74A77395FB8BC126447454838E561E962853DC7EB49A1E3CB67C3D0851F3E39517BE8C350AC910903D49CD2BFDF545C99316D0346170B739F0ADD5D533C2CFC", + "0DD57B423CC01EB2861391EB886A0D17079B933FC76EB3FC08A19F8A74952CB68F6BCDC644F77370966E4D13E80560BCF082EF0479D48FBBAB4DF03B53A4E178", + "4D8DC3923EDCCDFCE70072398B8A3DA5C31FCB3EE3B645C85F717CBAEB4B673A19394425A585BFB464D92F1597D0B754D163F97CED343B25DB5A70EF48EBB34F", + "F0A50553E4DFB0C4E3E3D3BA82034857E3B1E50918F5B8A7D698E10D242B0FB544AF6C92D0C3AAF9932220416117B4E78ECB8A8F430E13B82A5915290A5819C5", + "B15543F3F736086627CC5365E7E8988C2EF155C0FD4F428961B00D1526F04D6D6A658B4B8ED32C5D8621E7F4F8E8A933D9ECC9DD1B8333CBE28CFC37D9719E1C", + "7B4FA158E415FEF023247264CBBE15D16D91A44424A8DB707EB1E2033C30E9E1E7C8C0864595D2CB8C580EB47E9D16ABBD7E44E824F7CEDB7DEF57130E52CFE9", + "60424FF23234C34DC9687AD502869372CC31A59380186BC2361C835D972F49666EB1AC69629DE646F03F9B4DB9E2ACE093FBFDF8F20AB5F98541978BE8EF549F", + "7406018CE704D84F5EB9C79FEA97DA345699468A350EE0B2D0F3A4BF2070304EA862D72A51C57D3064947286F531E0EAF7563702262E6C724ABF5ED8C8398D17", + "14EF5C6D647B3BD1E6E32006C231199810DE5C4DC88E70240273B0EA18E651A3EB4F5CA3114B8A56716969C7CDA27E0C8DB832AD5E89A2DC6CB0ADBE7D93ABD1", + "38CF6C24E3E08BCF1F6CF3D1B1F65B905239A3118033249E448113EC632EA6DC346FEEB2571C38BD9A7398B2221280328002B23E1A45ADAFFE66D93F6564EAA2", + "6CD7208A4BC7E7E56201BBBA02A0F489CD384ABE40AFD4222F158B3D986EE72A54C50FB64FD4ED2530EDA2C8AF2928A0DA6D4F830AE1C9DB469DFD970F12A56F", + "659858F0B5C9EDAB5B94FD732F6E6B17C51CC096104F09BEB3AFC3AA467C2ECF885C4C6541EFFA9023D3B5738AE5A14D867E15DB06FE1F9D1127B77E1AABB516", + "26CCA0126F5D1A813C62E5C71001C046F9C92095704550BE5873A495A999AD010A4F79491F24F286500ADCE1A137BC2084E4949F5B7294CEFE51ECAFF8E95CBA", + "4147C1F55172788C5567C561FEEF876F621FFF1CE87786B8467637E70DFBCD0DBDB6415CB600954AB9C04C0E457E625B407222C0FE1AE21B2143688ADA94DC58", + "5B1BF154C62A8AF6E93D35F18F7F90ABB16A6EF0E8D1AECD118BF70167BAB2AF08935C6FDC0663CE74482D17A8E54B546D1C296631C65F3B522A515839D43D71", + "9F600419A4E8F4FB834C24B0F7FC13BF4E279D98E8A3C765EE934917403E3A66097182EA21453CB63EBBE8B73A9C2167596446438C57627F330BADD4F569F7D6", + "457EF6466A8924FD8011A34471A5A1AC8CCD9BD0D07A97414AC943021CE4B9E4B9C8DB0A28F016ED43B1542481990022147B313E194671131E708DD43A3ED7DC", + "9997B2194D9AF6DFCB9143F41C0ED83D3A3F4388361103D38C2A49B280A581212715FD908D41C651F5C715CA38C0CE2830A37E00E508CED1BCDC320E5E4D1E2E", + "5C6BBF16BAA180F986BD40A1287ED4C549770E7284858FC47BC21AB95EBBF3374B4EE3FD9F2AF60F3395221B2ACC76F2D34C132954049F8A3A996F1E32EC84E5", + "D10BF9A15B1C9FC8D41F89BB140BF0BE08D2F3666176D13BAAC4D381358AD074C9D4748C300520EB026DAEAEA7C5B158892FDE4E8EC17DC998DCD507DF26EB63", + "2FC6E69FA26A89A5ED269092CB9B2A449A4409A7A44011EECAD13D7C4B0456602D402FA5844F1A7A758136CE3D5D8D0E8B86921FFFF4F692DD95BDC8E5FF0052", + "FCBE8BE7DCB49A32DBDF239459E26308B84DFF1EA480DF8D104EEFF34B46FAE98627B450C2267D48C0946A697C5B59531452AC0484F1C84E3A33D0C339BB2E28", + "A19093A6E3BCF5952F850F2030F69B9606F147F90B8BAEE3362DA71D9F35B44EF9D8F0A7712BA1877FDDCD2D8EA8F1E5A773D0B745D4725605983A2DE901F803", + "3C2006423F73E268FA59D2920377EB29A4F9A8B462BE15983EE3B85AE8A78E992633581A9099893B63DB30241C34F643027DC878279AF5850D7E2D4A2653073A", + "D0F2F2E3787653F77CCE2FA24835785BBD0C433FC779465A115149905A9DD1CB827A628506D457FCF124A0C2AEF9CE2D2A0A0F63545570D8667FF9E2EBA07334", + "78A9FC048E25C6DCB5DE45667DE8FFDD3A93711141D594E9FA62A959475DA6075EA8F0916E84E45AD911B75467077EE52D2C9AEBF4D58F20CE4A3A00458B05D4", + "45813F441769AB6ED37D349FF6E72267D76AE6BB3E3C612EC05C6E02A12AF5A37C918B52BF74267C3F6A3F183A8064FF84C07B193D08066789A01ACCDB6F9340", + "956DA1C68D83A7B881E01B9A966C3C0BF27F68606A8B71D457BD016D4C41DD8A380C709A296CB4C6544792920FD788835771A07D4A16FB52ED48050331DC4C8B", + "DF186C2DC09CAA48E14E942F75DE5AC1B7A21E4F9F072A5B371E09E07345B0740C76177B01278808FEC025EDED9822C122AFD1C63E6F0CE2E32631041063145C", + "87475640966A9FDCD6D3A3B5A2CCA5C08F0D882B10243C0EC1BF3C6B1C37F2CD3212F19A057864477D5EAF8FAED73F2937C768A0AF415E84BBCE6BD7DE23B660", + "C3B573BBE10949A0FBD4FF884C446F2229B76902F9DFDBB8A0353DA5C83CA14E8151BBAAC82FD1576A009ADC6F1935CF26EDD4F1FB8DA483E6C5CD9D8923ADC3", + "B09D8D0BBA8A7286E43568F7907550E42036D674E3C8FC34D8CA46F771D6466B70FB605875F6A863C877D12F07063FDC2E90CCD459B1910DCD52D8F10B2B0A15", + "AF3A22BF75B21ABFB0ACD54422BA1B7300A952EFF02EBEB65B5C234471A98DF32F4F9643CE1904108A168767924280BD76C83F8C82D9A79D9259B195362A2A04", + "BF4FF2221B7E6957A724CD964AA3D5D0D9941F540413752F4699D8101B3E537508BF09F8508B317736FFD265F2847AA7D84BD2D97569C49D632AED9945E5FA5E", + "9C6B6B78199B1BDACB4300E31479FA622A6B5BC80D4678A6078F88A8268CD7206A2799E8D4621A464EF6B43DD8ADFFE97CAF221B22B6B8778B149A822AEFBB09", + "890656F09C99D280B5ECB381F56427B813751BC652C7828078B23A4AF83B4E3A61FDBAC61F89BEE84EA6BEE760C047F25C6B0A201C69A38FD6FD971AF18588BB", + "31A046F7882FFE6F83CE472E9A0701832EC7B3F76FBCFD1DF60FE3EA48FDE1651254247C3FD95E100F9172731E17FD5297C11F4BB328363CA361624A81AF797C", + "27A60B2D00E7A671D47D0AEC2A686A0AC04B52F40AB6629028EB7D13F4BAA99AC0FE46EE6C814944F2F4B4D20E9378E4847EA44C13178091E277B87EA7A55711", + "8B5CCEF194162C1F19D68F91E0B0928F289EC5283720840C2F73D253111238DCFE94AF2B59C2C1CA2591901A7BC060E7459B6C47DF0F71701A35CC0AA831B5B6", + "57AB6C4B2229AEB3B70476D803CD63812F107CE6DA17FED9B17875E8F86C724F49E024CBF3A1B8B119C50357652B81879D2ADE2D588B9E4F7CEDBA0E4644C9EE", + "0190A8DAC320A739F322E15731AA140DDAF5BED294D5C82E54FEF29F214E18AAFAA84F8BE99AF62950266B8F901F15DD4C5D35516FC35B4CAB2E96E4695BBE1C", + "D14D7C4C415EEB0E10B159224BEA127EBD84F9591C702A330F5BB7BB7AA44EA39DE6ED01F18DA7ADF40CFB97C5D152C27528824B21E239526AF8F36B214E0CFB", + "BE28C4BE706970488FAC7D29C3BD5C4E986085C4C3332F1F3FD30973DB614164BA2F31A78875FFDC150325C88327A9443ED04FDFE5BE93876D1628560C764A80", + "031DA1069E3A2E9C3382E436FFD79DF74B1CA6A8ADB2DEABE676AB45994CBC054F037D2F0EACE858D32C14E2D1C8B46077308E3BDC2C1B53172ECF7A8C14E349", + "4665CEF8BA4DB4D0ACB118F2987F0BB09F8F86AA445AA3D5FC9A8B346864787489E8FCECC125D17E9B56E12988EAC5ECC7286883DB0661B8FF05DA2AFFF30FE4", + "63B7032E5F930CC9939517F9E986816CFBEC2BE59B9568B13F2EAD05BAE7777CAB620C6659404F7409E4199A3BE5F7865AA7CBDF8C4253F7E8219B1BD5F46FEA", + "9F09BF093A2B0FF8C2634B49E37F1B2135B447AA9144C9787DBFD92129316C99E88AAB8A21FDEF2372D1189AEC500F95775F1F92BFB45545E4259FB9B7B02D14", + "F9F8493C68088807DF7F6A2693D64EA59F03E9E05A223E68524CA32195A4734B654FCEA4D2734C866CF95C889FB10C49159BE2F5043DC98BB55E02EF7BDCB082", + "3C9A7359AB4FEBCE07B20AC447B06A240B7FE1DAE5439C49B60B5819F7812E4C172406C1AAC316713CF0DDED1038077258E2EFF5B33913D9D95CAEB4E6C6B970", + "AD6AAB8084510E822CFCE8625D62CF4DE655F4763884C71E80BAB9AC9D5318DBA4A6033ED29084E65216C031606CA17615DCFE3BA11D26851AE0999CA6E232CF", + "156E9E6261374C9DC884F36E70F0FE1AB9297997B836FA7D170A9C9EBF575B881E7BCEA44D6C0248D35597907154828955BE19135852F9228815ECA024A8ADFB", + "4215407633F4CCA9B6788BE93E6AA3D963C7D6CE4B147247099F46A3ACB500A30038CB3E788C3D29F132AD844E80E9E99251F6DB96ACD8A091CFC770AF53847B", + "1C077E279DE6548523502B6DF800FFDAB5E2C3E9442EB838F58C295F3B147CEF9D701C41C321283F00C71AFFA0619310399126295B78DD4D1A74572EF9ED5135", + "F07A555F49FE481CF4CD0A87B71B82E4A95064D06677FDD90A0EB598877BA1C83D4677B393C3A3B6661C421F5B12CB99D20376BA7275C2F3A8F5A9B7821720DA", + "B5911B380D20C7B04323E4026B38E200F534259233B581E02C1E3E2D8438D6C66D5A4EB201D5A8B75072C4EC29106334DA70BC79521B0CED2CFD533F5FF84F95", + "01F070A09BAE911296361F91AA0E8E0D09A7725478536D9D48C5FE1E5E7C3C5B9B9D6EB07796F6DA57AE562A7D70E882E37ADFDE83F0C433C2CD363536BB22C8", + "6F793EB4374A48B0775ACAF9ADCF8E45E54270C9475F004AD8D5973E2ACA52747FF4ED04AE967275B9F9EB0E1FF75FB4F794FA8BE9ADD7A41304868D103FAB10", + "965F20F139765FCC4CE4BA3794675863CAC24DB472CD2B799D035BCE3DBEA502DA7B524865F6B811D8C5828D3A889646FE64A380DA1AA7C7044E9F245DCED128", + "EC295B5783601244C30E4641E3B45BE222C4DCE77A58700F53BC8EC52A941690B4D0B087FB6FCB3F39832B9DE8F75EC20BD43079811749CDC907EDB94157D180", + "61C72F8CCC91DBB54CA6750BC489672DE09FAEDB8FDD4F94FF2320909A303F5D5A98481C0BC1A625419FB4DEBFBF7F8A53BB07EC3D985E8EA11E72D559940780", + "AFD8145B259EEFC8D12620C3C5B03E1ED8FD2CCEFE0365078C80FD42C1770E28B44948F27E65A1886690110DB814397B68E43D80D1BA16DFA358E739C898CFA3", + "552FC7893CF1CE933ADA35C0DA98844E41545E244C3157A1428D7B4C21F9CD7E4071AED77B7CA9F1C38FBA32237412EF21A342742EC8324378F21E507FAFDD88", + "467A33FBADF5EBC52596EF86AAAEFC6FABA8EE651B1CE04DE368A03A5A9040EF2835E00ADB09ABB3FBD2BCE818A2413D0B0253B5BDA4FC5B2F6F85F3FD5B55F2", + "22EFF8E6DD5236F5F57D94EDE874D6C9428E8F5D566F17CD6D1848CD752FE13C655CB10FBAAFF76872F2BF2DA99E15DC624075E1EC2F58A3F64072121838569E", + "9CEC6BBF62C4BCE4138ABAE1CBEC8DAD31950444E90321B1347196834C114B864AF3F3CC3508F83751FFB4EDA7C84D140734BB4263C3625C00F04F4C8068981B", + "A8B60FA4FC2442F6F1514AD7402626920CC7C2C9F72124B8CBA8EE2CB7C4586F658A4410CFFCC0AB88343955E094C6AF0D20D0C714FB0A988F543F300F58D389", + "8271CC45DFA5E4170E847E8630B952CF9C2AA777D06F26A7585B8381F188DACC7337391CFCC94B053DC4EC29CC17F077870428F1AC23FDDDA165EF5A3F155F39", + "BF23C0C25C8060E4F6995F1623A3BEBECAA96E308680000A8AA3CD56BB1A6DA099E10D9231B37F4519B2EFD2C24DE72F31A5F19535241B4A59FA3C03CEB790E7", + "877FD652C05281009C0A5250E7A3A671F8B18C108817FE4A874DE22DA8E45DB11958A600C5F62E67D36CBF84474CF244A9C2B03A9FB9DC711CD1A2CAB6F3FAE0", + "29DF4D87EA444BAF5BCDF5F4E41579E28A67DE84149F06C03F110EA84F572A9F676ADDD04C4878F49C5C00ACCDA441B1A387CACEB2E993BB7A10CD8C2D6717E1", + "710DACB166844639CD7B637C274209424E2449DC35D790BBFA4F76177054A36B3B76FAC0CA6E61DF1E687000678AC0746DF75D0A3954897681FD393A155A1BB4", + "C1D5F93B8DEA1F2571BABCCBC01764541A0CDA87E444D673C50966CA559C33354B3ACB26E5D5781FFB28847A4B4754D77008C62A835835F500DEA7C3B58BDAE2", + "A41E41271CDAB8AF4D72B104BFB2AD041AC4DF14677DA671D85640C4B187F50C2B66513C4619FBD5D5DC4FE65DD37B9042E9848DDA556A504CAA2B1C6AFE4730", + "E7BCBACDC379C43D81EBADCB37781552FC1D753E8CF310D968392D06C91F1D64CC9E90CE1D22C32D277FC6CDA433A4D442C762E9EACF2C259F32D64CF9DA3A22", + "51755B4AC5456B13218A19C5B9242F57C4A981E4D4ECDCE09A3193362B808A579345D4881C2607A56534DD7F21956AFF72C2F4173A6E7B6CC2212BA0E3DAEE1F", + "DCC2C4BEB9C1F2607B786C20C631972347034C1CC02FCC7D02FF01099CFE1C6989840AC213923629113AA8BAD713CCF0FE4CE13264FB32B8B0FE372DA382544A", + "3D55176ACEA4A7E3A65FFA9FB10A7A1767199CF077CEE9F71532D67CD7C73C9F93CFC37CCDCC1FDEF50AAD46A504A650D298D597A3A9FA95C6C40CB71FA5E725", + "D07713C005DE96DD21D2EB8BBECA66746EA51A31AE922A3E74864889540A48DB27D7E4C90311638B224BF0201B501891754848113C266108D0ADB13DB71909C7", + "58983C21433D950CAA23E4BC18543B8E601C204318532152DAF5E159A0CD1480183D29285C05F129CB0CC3164687928086FFE380158DF1D394C6AC0D4288BCA8", + "8100A8DC528D2B682AB4250801BA33F02A3E94C54DAC0AE1482AA21F51EF3A82F3807E6FACB0AEB05947BF7AA2ADCB034356F90FA4560EDE02201A37E411EC1A", + "07025F1BB6C784F3FE49DE5C14B936A5ACACACAAB33F6AC4D0E00AB6A12483D6BEC00B4FE67C7CA5CC508C2A53EFB5BFA5398769D843FF0D9E8B14D36A01A77F", + "BA6AEFD972B6186E027A76273A4A723321A3F580CFA894DA5A9CE8E721C828552C64DACEE3A7FD2D743B5C35AD0C8EFA71F8CE99BF96334710E2C2346E8F3C52", + "E0721E02517AEDFA4E7E9BA503E025FD46E714566DC889A84CBFE56A55DFBE2FC4938AC4120588335DEAC8EF3FA229ADC9647F54AD2E3472234F9B34EFC46543", + "B6292669CCD38D5F01CAAE96BA272C76A879A45743AFA0725D83B9EBB26665B731F1848C52F11972B6644F554C064FA90780DBBBF3A89D4FC31F67DF3E5857EF", + "2319E3789C47E2DAA5FE807F61BEC2A1A6537FA03F19FF32E87EECBFD64B7E0E8CCFF439AC333B040F19B0C4DDD11A61E24AC1FE0F10A039806C5DCC0DA3D115", + "F59711D44A031D5F97A9413C065D1E614C417EDE998590325F49BAD2FD444D3E4418BE19AEC4E11449AC1A57207898BC57D76A1BCF3566292C20C683A5C4648F", + "DF0A9D0C212843A6A934E3902B2DD30D17FBA5F969D2030B12A546D8A6A45E80CF5635F071F0452E9C919275DA99BED51EB1173C1AF0518726B75B0EC3BAE2B5", + "A3EB6E6C7BF2FB8B28BFE8B15E15BB500F781ECC86F778C3A4E655FC5869BF2846A245D4E33B7B14436A17E63BE79B36655C226A50FFBC7124207B0202342DB5", + "56D4CBCD070563426A017069425C2CD2AE540668287A5FB9DAC432EB8AB1A353A30F2FE1F40D83333AFE696A267795408A92FE7DA07A0C1814CF77F36E105EE8", + "E59B9987D428B3EDA37D80ABDB16CD2B0AEF674C2B1DDA4432EA91EE6C935C684B48B4428A8CC740E579A30DEFF35A803013820DD23F14AE1D8413B5C8672AEC", + "CD9FCC99F99D4CC16D031900B2A736E1508DB4B586814E6345857F354A70CCECB1DF3B50A19ADAF43C278EFA423FF4BB6C523EC7FD7859B97B168A7EBFF8467C", + "0602185D8C3A78738B99164B8BC6FFB21C7DEBEBBF806372E0DA44D121545597B9C662A255DC31542CF995ECBE6A50FB5E6E0EE4EF240FE557EDED1188087E86", + "C08AFA5B927BF08097AFC5FFF9CA4E7800125C1F52F2AF3553FA2B89E1E3015C4F87D5E0A48956AD31450B083DAD147FFB5EC03434A26830CF37D103AB50C5DA", + "36F1E1C11D6EF6BC3B536D505D544A871522C5C2A253067EC9933B6EC25464DAF985525F5B9560A16D890259AC1BB5CC67C0C469CDE133DEF000EA1D686F4F5D", + "BF2AB2E2470F5438C3B689E66E7686FFFA0CB1E1798AD3A86FF99075BF6138E33D9C0CE59AFB24AC67A02AF34428191A9A0A6041C07471B7C3B1A752D6FC0B8B", + "D400601F9728CCC4C92342D9787D8D28AB323AF375CA5624B4BB91D17271FBAE862E413BE73F1F68E615B8C5C391BE0DBD9144746EB339AD541547BA9C468A17", + "79FE2FE157EB85A038ABB8EBBC647731D2C83F51B0AC6EE14AA284CB6A3549A4DCCEB300740A825F52F5FB30B03B8C4D8B0F4AA67A63F4A94E3303C4EDA4C02B", + "75351313B52A8529298D8C186B1768666DCCA8595317D7A4816EB88C062020C0C8EFC554BB341B64688DB5CCAFC35F3C3CD09D6564B36D7B04A248E146980D4B", + "E3128B1D311D02179D7F25F97A5A8BEE2CC8C86303644FCD664E157D1FEF00F23E46F9A5E8E5C890CE565BB6ABD4302CE06469D52A5BD53E1C5A54D04649DC03", + "C2382A72D2D3ACE9D5933D00B60827ED380CDA08D0BA5F6DD41E29EE6DBE8ECB9235F06BE95D83B6816A2FB7A5AD47035E8A4B69A4884B99E4BECE58CAB25D44", + "6B1C69460BBD50AC2ED6F32E6E887CFED407D47DCF0AAA60387FE320D780BD03EAB6D7BAEB2A07D10CD552A300341354EA9A5F03183A623F92A2D4D9F00926AF", + "6CDA206C80CDC9C44BA990E0328C314F819B142D00630404C48C05DC76D1B00CE4D72FC6A48E1469DDEF609412C364820854214B4869AF090F00D3C1BA443E1B", + "7FFC8C26FBD6A0F7A609E6E1939F6A9EDF1B0B066641FB76C4F9602ED748D11602496B35355B1AA255850A509D2F8EE18C8F3E1D7DCBC37A136598F56A59ED17", + "70DE1F08DD4E09D5FC151F17FC991A23ABFC05104290D50468882EFAF582B6EC2F14F577C0D68C3AD06626916E3C86E6DAAB6C53E5163E82B6BD0CE49FC0D8DF", + "4F81935756ED35EE2058EE0C6A6110D6FAC5CB6A4F46AA9411603F99965823B6DA4838276C5C06BC7880E376D92758369EE7305BCEC8D3CFD28CCABB7B4F0579", + "ABCB61CB3683D18F27AD527908ED2D32A0426CB7BB4BF18061903A7DC42E7E76F982382304D18AF8C80D91DD58DD47AF76F8E2C36E28AF2476B4BCCF82E89FDF", + "02D261AD56A526331B643DD2186DE9A82E72A58223CD1E723686C53D869B83B94632B7B647AB2AFC0D522E29DA3A5615B741D82852E0DF41B66007DBCBA90543", + "C5832741FA30C5436823015383D297FF4C4A5D7276C3F902122066E04BE5431B1A85FAF73B918434F9300963D1DEA9E8AC3924EF490226EDEEA5F743E410669F", + "CFAEAB268CD075A5A6AED515023A032D54F2F2FF733CE0CBC78DB51DB4504D675923F82746D6594606AD5D67734B11A67CC6A468C2032E43CA1A94C6273A985E", + "860850F92EB268272B67D133609BD64E34F61BF03F4C1738645C17FEC818465D7ECD2BE2907641130025FDA79470AB731646E7F69440E8367EA76AC4CEE8A1DF", + "84B154ED29BBEDEFA648286839046F4B5AA34430E2D67F7496E4C39F2C7EA78995F69E1292200016F16AC3B37700E6C7E7861AFC396B64A59A1DBF47A55C4BBC", + "AEEEC260A5D8EFF5CCAB8B95DA435A63ED7A21EA7FC7559413FD617E33609F8C290E64BBACC528F6C080262288B0F0A3219BE223C991BEE92E72349593E67638", + "8AD78A9F26601D127E8D2F2F976E63D19A054A17DCF59E0F013AB54A6887BBDFFDE7AAAE117E0FBF3271016595B9D9C712C01B2C53E9655A382BC4522E616645", + "8934159DADE1AC74147DFA282C75954FCEF443EF25F80DFE9FB6EA633B8545111D08B34EF43FFF17026C7964F5DEAC6D2B3C29DACF2747F022DF5967DFDC1A0A", + "CD36DD0B240614CF2FA2B9E959679DCDD72EC0CD58A43DA3790A92F6CDEB9E1E795E478A0A47D371100D340C5CEDCDBBC9E68B3F460818E5BDFF7B4CDA4C2744", + "00DF4E099B807137A85990F49D3A94315E5A5F7F7A6076B303E96B056FB93800111F479628E2F8DB59AEB6AC70C3B61F51F9B46E80FFDEAE25EBDDB4AF6CB4EE", + "2B9C955E6CAED4B7C9E246B86F9A1726E810C59D126CEE66ED71BF015B83558A4B6D84D18DC3FF4620C2FFB722359FDEF85BA0D4E2D22ECBE0ED784F99AFE587", + "181DF0A261A2F7D29EA5A15772715105D450A4B6C236F699F462D60CA76487FEEDFC9F5EB92DF838E8FB5DC3694E84C5E0F4A10B761F506762BE052C745A6EE8", + "21FB203458BF3A7E9A80439F9A902899CD5DE0139DFD56F7110C9DEC8437B26BDA63DE2F565926D85EDB1D6C6825669743DD9992653D13979544D5DC8228BFAA", + "EF021F29C5FFB830E64B9AA9058DD660FD2FCB81C497A7E698BCFBF59DE5AD4A86FF93C10A4B9D1AE5774725F9072DCDE9E1F199BAB91F8BFF921864AA502EEE", + "B3CFDA40526B7F1D37569BDFCDF911E5A6EFE6B2EC90A0454C47B2C046BF130FC3B352B34DF4813D48D33AB8E269B69B075676CB6D00A8DCF9E1F967EC191B2C", + "B4C6C3B267071EEFB9C8C72E0E2B941293641F8673CB70C1CC26AD1E73CF141755860AD19B34C2F34ED35BB52EC4507CC1FE59047743A5F0C6FEBDE625E26091", + "57A34F2BCCA60D4B85103B830C9D7952A416BE5263AE429C9E5E53FE8590A8F78EC65A51109EA85DCDF7B6223F9F2B340539FAD81923DBF8EDABF95129E4DFF6", + "9CF46662FCD61A232277B685663B8B5DA832DFD9A3B8CCFEEC993EC6AC415AD07E048ADFE414DF272770DBA867DA5C1224C6FD0AA0C2187D426AC647E9887361", + "5CE1042AB4D542C2F9EE9D17262AF8164098935BEF173D0E18489B04841746CD2F2DF866BD7DA6E5EF9024C648023EC723AB9C62FD80285739D84F15D2AB515A", + "8488396BD4A8729B7A473178F232DADF3F0F8E22678BA5A43E041E72DA1E2CF82194C307207A54CB8156293339EAEC693FF66BFCD5EFC65E95E4ECAF54530ABD", + "F598DA901C3835BCA560779037DFDE9F0C51DC61C0B760FC1522D7B470EE63F5BDC6498476E86049AD86E4E21AF2854A984CC905427D2F17F66B1F41C3DA6F61", + "5F93269798CF02132107337660A8D7A177354C0212EB93E555E7C37A08AEF3D8DCE01217011CD965C04DD2C105F2E2B6CAE5E4E6BCAF09DFBEE3E0A6A6357C37", + "0ECF581D47BAC9230986FAABD70C2F5B80E91066F0EC55A842937882286D2CA007BB4E973B0B091D52167FF7C4009C7AB4AD38FFF1DCEACDB7BE81EF4A452952", + "5AECA8ABE1528582B2A307B4009585498A3D467CA6101CB0C5126F9976056E9FFC123CC20C302B2A737F492C75D21F01512C90CA0541DFA56E950A321DCB28D8", + "732FBF8F1CB2B8329263EDE27858FE46F8D3354D376BCDA0548E7CE1FA9DD11F85EB661FE950B543AA635CA4D3F04EDE5B32D6B656E5CE1C44D35C4A6C56CFF8", + "D5E938735D63788C80100AEFD18648D18CF272F69F20FF24CFE2895C088AD08B0104DA1672A4EB26FC52545CC7D7A01B266CF546C403C45BD129EB41BDD9200B", + "65A245B49352EE297D91AF8C8BE00528AC6E046DD83AC7BD465A98816DD68F3E00E1AE8F895327A7E9A8C9326598379A29C9FC91EC0C6EEF08F3E2B216C11008", + "C95654B63019130AB45DD0FB4941B98AEB3AF2A123913ECA2CE99B3E97410A7BF8661CC7FBAA2BC1CF2B13113B1ED40A0118B88E5FFFC3542759EA007ED4C58D", + "1EB262F38FA494431F017DAD44C0DFB69324AC032F04B657FC91A88647BB74760F24E7C956514F0CF002990B182C1642B9B2426E96A61187E4E012F00E217D84", + "3B955AEEBFA5151AC1AB8E3F5CC1E3767084C842A575D36269836E97353D41622B731DDDCD5F269550A3A5B87BE1E90326340B6E0E62555815D9600597AC6EF9", + "68289F6605473BA0E4F241BAF7477A9885426A858F19EF2A18B0D40EF8E41282ED5526B519799E270F13881327918278755711071D8511FE963E3B5606AA3716", + "80A33787542612C38F6BCD7CD86CAB460227509B1CBAD5EC408A91413D51155A0476DADBF3A2518E4A6E77CC346622E347A469BF8BAA5F04EB2D98705355D063", + "34629BC6D831391C4CDF8AF1B4B7B6B8E8EE17CF98C70E5DD586CD99F14B11DF945166236A9571E6D591BB83EE4D164D46F6B9D8EF86FF865A81BFB91B00424B", + "8B7CC339163863BB4383E542B0EF0E7CF36B84AD932CDF5A80419EC9AD692E7A7E784D2C7CB3796A18B8F800035F3AA06C824100611120A7BDEB35618CCB81B7", + "4F084E4939DD5A7F5A658FAD58A18A15C25C32EC1C7FD5C5C6C3E892B3971AEAAC308304EF17B1C47239EA4BB398B3FD6D4528D8DE8E768AE0F1A5A5C6B5C297", + "48F407A1AF5B8009B2051742E8CF5CD5656669E7D722EE8E7BD202060849442168D8FACC117C012BFB7BF449D99BEFFF6A34AEA203F1D8D352722BE5014EC818", + "A6AA82CD1E426F9A73BFA39A29037876114655B8C22D6D3FF8B638AE7DEA6B17843E09E52EB66FA1E475E4A8A3DE429B7D0F4A776FCB8BDC9B9FEDE7D52E815F", + "5817027D6BDD00C5DD10AC593CD560372270775A18526D7E6F13872A2E20EAB664625BE7168AC4BD7C9E0CE7FC4099E0F48442E2C767191C6E1284E9B2CCEA8C", + "08E41028340A45C74E4052B3A8D6389E22E043A1ADAB5E28D97619450D723469B620CAA519B81C14523854F619FD3027E3847BD03276E60604A80DDB4DE876D6", + "130B8420537EB07D72ABDA07C85ACBD8B9A44F16321DD0422145F809673D30F2B5321326E2BFF317EF3FEF983C51C4F8AB24A325D298E34AFCE569A82555774C", + "AC49B844AFAA012E31C474CA263648844FD2F6307992C2F752ACA02C3828965175794DEEE2D2EE95C61CD284F6B5A2D75E2EF2B29EE8149E77FB81447B2FD04B", + "B9D7CA81CC60BB9578E44024E5A0A0BE80F27336A6A9F4E53DF3999CB191280B090E2AC2D29C5BAAD9D71415BDC129E69AA2667AF6A7FD5E189FCCDCEE817340", + "A755E113386572C75CED61D719706070B9146048E42A9F8CD35667A088B42F08808ABDF77E618ABD959AFC757379CA2C00BCC1A48390FA2BFF618B1E0078A613", + "A73C7DEBED326F1C0DB0795EE7D6E3946894B826B1F8101C56C823BA17168312E7F53FC7DBE52C3E11E69852C40485E2EF182477862EA6A34EC136E2DFEEA6F4", + "6CB8F9D52C56D82CAC28F39EA1593E8BB2506293AC0D68376A1709B62A46DF14A4AE64B2D8FAB76733A1CED2D548E3F3C6FCB49D40C3D5808E449CD83D1C2AA2", + "683FA2B2369A10162C1C1C7B24BC970EE67DA220564F32203F625696C0352A0B9AD96624362D952D84463C1106A2DBA7A092599884B35A0B89C8F1B6A9B5A61E", + "AAD9AD44610118B77D508AEB1BBCD1C1B7D0171397FB510A401BBC0EC34623670D86A2DC3C8F3AB5A2044DF730256727545F0860CE21A1EAC717DFC48F5D228E", + "C42578DE23B4C987D5E1AC4D689ED5DE4B0417F9704BC6BCE969FA13471585D62C2CB1212A944F397FC9CA2C3747C3BEB694EC4C5BE68828DDA53EF43FAEC6C0", + "470F00841EE8244E63ED2C7EA30E2E419897C197462ECCCECF713B42A5065FFF5914BC9B79AFFE8F6B657875E789AE213BD914CD35BD174D46E9D18BD843773D", + "34FC4213730F47A5E9A3580F643E12945CFCB31BF206F6AD450CE528DA3FA432E005D6B0ECCE10DCA7C5995F6AACC5150E1B009E19751E8309F8859531844374", + "FB3C1F0F56A56F8E316FDF5D853C8C872C39635D083634C3904FC3AC07D1B578E85FF0E480E92D44ADE33B62E893EE32343E79DDF6EF292E89B582D312502314", + "C7C97FC65DD2B9E3D3D607D31598D3F84261E9919251E9C8E57BB5F829377D5F73EABBED55C6C381180F29AD02E5BE797FFEC7E57BDECBC50AD3D062F0993AB0", + "A57A49CDBE67AE7D9F797BB5CC7EFC2DF07F4E1B15955F85DAE74B76E2ECB85AFB6CD9EEED8888D5CA3EC5AB65D27A7B19E578475760A045AC3C92E13A938E77", + "C7143FCE9614A17FD653AEB140726DC9C3DBB1DE6CC581B2726897EC24B7A50359AD492243BE66D9EDD8C933B5B80E0B91BB61EA98056006516976FAE8D99A35", + "65BB58D07F937E2D3C7E65385F9C54730B704105CCDB691F6E146D4EE8F6C086F49511035110A9AD6031FDCEB943E0F9613BCB276DD40F0624EF0F924F809783", + "E540277F683B1186DD3B5B3F61433396581A35FEB12002BE8C6A6231FC40FFA70F08081BC58B2D94F7649543614A435FAA2D62110E13DABC7B86629B63AF9C24", + "418500878C5FBCB584C432F4285E05E49F2E3E075399A0DBFCF874EBF8C03D02BF16BC6989D161C77CA0786B05053C6C709433712319192128835CF0B660595B", + "889090DBB1944BDC9433EE5EF1010C7A4A24A8E71ECEA8E12A31318CE49DCAB0ACA5C3802334AAB2CC84B14C6B9321FE586BF3F876F19CD406EB1127FB944801", + "53B6A28910AA92E27E536FB549CF9B9918791060898E0B9FE183577FF43B5E9C7689C745B32E412269837C31B89E6CC12BF76E13CAD366B74ECE48BB85FD09E9", + "7C092080C6A80D672409D081D3D177106BCD63567785140719490950AE07AE8FCAABBAAAB330CFBCF7374482C220AF2EADEEB73DCBB35ED823344E144E7D4899", + "9CCDE566D2400509181111F32DDE4CD63209FE59A30C114546AD2776D889A41BAD8FA1BB468CB2F9D42CA9928A7770FEF8E8BA4D0C812D9A1E75C3D8D2CCD75A", + "6E293BF5D03FE43977CFE3F57CCDB3AE282A85455DCA33F37F4B74F8398CC612433D755CBEC412F8F82A3BD3BC4A278F7ECD0DFA9BBDC40BE7A787C8F159B2DF", + "C56546FB2178456F336164C18B90DEFFC83AE2B5A3ACA77B6884D36D2C1DB39501B3E65E36C758C66E3188451FDB3515EE162C001F06C3E8CB573ADF30F7A101", + "6F82F89F299EBCA2FE014B59BFFE1AA84E88B1915FE256AFB646FD8448AF2B8891A7FAB37A4EA6F9A50E6C317039D8CF878F4C8E1A0DD464F0B4D6FF1C7EA853", + "2B8599FF9C3D6198637AD51E57D1998B0D75313FE2DD61A533C964A6DD9607C6F723E9452CE46E014B1C1D6DE77BA5B88C914D1C597BF1EAE13474B4290E89B2", + "08BF346D38E1DF06C8260EDB1DA75579275948D5C0A0AA9ED2886F8856DE5417A156998758F5B17E52F101CA957A71137473DFD18D7D209C4C10D9233C93691D", + "6DF2156D773114D310B63DB9EE5350D77E6BCF25B05FCD910F9B31BC42BB13FE8225EBCB2A23A62280777B6BF74E2CD0917C7640B43DEFE468CD1E18C943C66A", + "7C7038BC13A91151828A5BA82B4A96040F258A4DFB1B1373F0D359168AFB0517A20B28A12D3644046BE66B8D08D8AE7F6A923EA1C00187C6D11DC502BAC71305", + "BCD1B30D808FB739B987CBF154BEA00DA9D40380B861D4C1D6377122DADD61C0E59018B71941CFB62E00DCD70AEB9ABF0473E80F0A7ECA6B6DEA246AB229DD2B", + "7ED4468D968530FE7AB2C33540B26D8C3BD3ED44B34FBE8C2A9D7F805B5ADA0EA252EEADE4FCE97F89728AD85BC8BB2430B1BEF2CDDD32C8446E59B8E8BA3C67", + "6D30B7C6CE8A3236C0CA2F8D728B1088CA06983A8043E621D5DCF0C537D13B08791EDEB01A3CF0943EC1C890AB6E29B146A236CD46BCB9D93BF516FB67C63FE5", + "97FE03CEF31438508911BDED975980A66029305DC5E3FA8AD1B4FB22FCDF5A19A733320327D8F71CCF496CB3A44A77AF56E3DDE73D3A5F176896CC57C9A5AD99", + "785A9D0FBD21136DBCE8FA7EAFD63C9DAD220052978416B31D9753EAA149097847ED9B30A65C70507EFF01879149ED5CF0471D37798EDC05ABD56AD4A2CCCB1D", + "AD408D2ABDDFD37B3BF34794C1A3371D928ED7FC8D966225333584C5665817832A37C07F0DC7CB5AA874CD7D20FE8FAB8EABCB9B33D2E0841F6E200960899D95", + "97668F745B6032FC815D9579322769DCCD9501A5080029B8AE826BEFB6742331BD9F76EFEB3E2B8E81A9786B282F5068A3A2424697A77C41876B7E753F4C7767", + "26BB985F47E7FEE0CFD252D4EF96BED42B9C370C1C6A3E8C9EB04EF7F7818B833A0D1F043EBAFB911DC779E02740A02A44D3A1EA45ED4AD55E686C927CAFE97E", + "5BFE2B1DCF7FE9B95088ACEDB575C19016C743B2E763BF5851AC407C9EDA43715EDFA48B4825492C5179593FFF21351B76E8B7E034E4C53C79F61F29C479BD08", + "C76509EF72F4A6F9C9C40618ED52B2084F83502232E0AC8BDAF3264368E4D0180F6854C4ABF4F6509C79CAAFC44CF3194AFC57BD077BD7B3C9BDA3D4B8775816", + "D66F2BEAB990E354CCB910E4E9C7AC618C7B63EF292A96B552341DE78DC46D3EC8CFABC699B50AF41FDA39CF1B0173660923510AD67FAEDEF5207CFFE8641D20", + "7D8F0672992B79BE3A364D8E5904F4AB713BBC8AB01B4F309AD8CCF223CE1034A860DCB0B00550612CC2FA17F2969E18F22E1427D254B4A82B3A03A3EB394ADF", + "A56D6725BFB3DE47C1414ADF25FC8F0FC9846F6987722BC06366D5CA4E89722925EBBC881418844075397A0CA89842C7B9E9E07E1D9D183EBEB39E120B483BF7", + "AF5E03D7FE60C67E10313344434E79485A03A758D6DCE985574745763C1C5C77D4FB3E6FB12230368370993BF90FEED0C5D1607524562D7C09C0C210ED393D7C", + "7A20540CC07BF72B582421FC342E82F52134B69841EC28ED189E2EA6A29DD2F82A640352D222B52F2911DC72A7DAB31CAADD80C6118F13C56B2A1E4373BE0EA3", + "486F02C63E5467EA1FDDE7E82BFACC2C1BA5D636D9F3D08B210DA3F372F706EC218CC17FF60AEF703BBE0C15C38AE55D286A684F864C78211CCAB4178C92ADBA", + "1C7A5C1DEDCD04A921788F7EB23361CA1953B04B9C7AEC35D65EA3E4996DB26F281278EA4AE666AD81027D98AF57262CDBFA4C085F4210568C7E15EEC7805114", + "9CE3FA9A860BDBD5378FD6D7B8B671C6CB7692910CE8F9B6CB4122CBCBE6AC06CA0422CEF1225935053B7D193A81B9E972EB85A1D3074F14CBB5EC9F0573892D", + "A91187BE5C371C4265C174FD4653B8AB708551F83D1FEE1CC1479581BC006D6FB78FCC9A5DEE1DB3666F508F9780A37593EBCCCF5FBED39667DC6361E921F779", + "4625767D7B1D3D3ED2FBC674AF14E0244152F2A4021FCF3311505D89BD81E2F9F9A500C3B199914DB49500B3C98D03EA93286751A686A3B875DAAB0CCD63B44F", + "43DFDFE1B014FED3A2ACABB7F3E9A182F2AA18019D27E3E6CDCF31A15B428E91E7B08CF5E5C376FCE2D8A28FF85AB0A0A1656EDB4A0A91532620096D9A5A652D", + "279E3202BE3989BA3112772585177487E4FE3EE3EAB49C2F7FA7FE87CFE7B80D3E0355EDFF6D031E6C96C795DB1C6F041880EC3824DEFACF9263820A8E7327DE", + "EA2D066AC229D4D4B616A8BEDEC734325224E4B4E58F1AE6DAD7E40C2DA29196C3B1EA9571DACC81E87328CAA0211E09027B0524AA3F4A849917B3586747EBBB", + "49F014F5C61822C899AB5CAE51BE4044A4495E777DEB7DA9B6D8490EFBB87530ADF293DAF079F94C33B7044EF62E2E5BB3EB11E17304F8453EE6CE24F033DDB0", + "9233490344E5B0DC5912671B7AE54CEE7730DBE1F4C7D92A4D3E3AAB50571708DB51DCF9C2944591DB651DB32D22935B86944969BE77D5B5FEAE6C3840A8DB26", + "B6E75E6F4C7F453B7465D25B5AC8C7196902EAA953875228C8634E16E2AE1F38BC3275304335F5989ECCC1E34167D4E68D7719968FBA8E2FE67947C35C48E806", + "CC14CA665AF1483EFBC3AF80080E650D5046A3932F4F51F3FE90A0705EC25104ADF07839265DC51D43401411246E474F0D5E5637AF94767283D53E0617E981F4", + "230A1C857CB2E7852E41B647E90E4585D2D881E1734DC38955356E8DD7BFF39053092C6B38E236E1899525647073DDDF6895D64206325E7647F275567B255909", + "CBB65321AC436E2FFDAB2936359CE49023F7DEE7614EF28D173C3D27C5D1BFFA51553D433F8EE3C9E49C05A2B883CCE954C9A8093B80612A0CDD4732E041F995", + "3E7E570074337275EFB51315588034C3CF0DDDCA20B4612E0BD5B881E7E5476D319CE4FE9F19186E4C0826F44F131EB048E65BE242B1172C63BADB123AB0CBE8", + "D32E9EC02D38D4E1B8249DF8DCB00C5B9C68EB8922672E3505393B6A210BA56F9496E5EE0490EF387C3CDEC061F06BC0382D9304CAFBB8E0CD33D57029E62DF2", + "8C1512466089F05B3775C262B62D22B83854A83218130B4EC91B3CCBD293D2A54302CECAAB9B100C68D1E6DDC8F07CDDBDFE6FDAAAF099CC09D6B725879C6369", + "91A7F61C97C2911E4C812EF71D780AD8FA788794561D08303FD1C1CB608A46A12563086EC5B39D471AED94FB0F6C678A43B8792932F9028D772A22768EA23A9B", + "4F6BB222A395E8B18F6BA155477AED3F0729AC9E83E16D31A2A8BC655422B837C891C6199E6F0D75799E3B691525C581953517F252C4B9E3A27A28FBAF49644C", + "5D06C07E7A646C413A501C3F4BB2FC38127DE7509B7077C4D9B5613201C1AA02FD5F79D2745915DD57FBCB4CE08695F6EFC0CB3D2D330E19B4B0E6004EA6471E", + "B96756E57909968F14B796A5D30F4C9D671472CF82C8CFB2CACA7AC7A44CA0A14C9842D00C82E337502C94D5960ACA4C492EA7B0DF919DDF1AADA2A275BB10D4", + "FF0A015E98DB9C99F03977710AAC3E658C0D896F6D71D618BA79DC6CF72AC75B7C038EB6862DEDE4543E145413A6368D69F5722C827BA3EF25B6AE6440D39276", + "5B21C5FD8868367612474FA2E70E9CFA2201FFEEE8FAFAB5797AD58FEFA17C9B5B107DA4A3DB6320BAAF2C8617D5A51DF914AE88DA3867C2D41F0CC14FA67928", +} + +var keyed2B = []string{ + "10EBB67700B1868EFB4417987ACF4690AE9D972FB7A590C2F02871799AAA4786B5E996E8F0F4EB981FC214B005F42D2FF4233499391653DF7AEFCBC13FC51568", + "961F6DD1E4DD30F63901690C512E78E4B45E4742ED197C3C5E45C549FD25F2E4187B0BC9FE30492B16B0D0BC4EF9B0F34C7003FAC09A5EF1532E69430234CEBD", + "DA2CFBE2D8409A0F38026113884F84B50156371AE304C4430173D08A99D9FB1B983164A3770706D537F49E0C916D9F32B95CC37A95B99D857436F0232C88A965", + "33D0825DDDF7ADA99B0E7E307104AD07CA9CFD9692214F1561356315E784F3E5A17E364AE9DBB14CB2036DF932B77F4B292761365FB328DE7AFDC6D8998F5FC1", + "BEAA5A3D08F3807143CF621D95CD690514D0B49EFFF9C91D24B59241EC0EEFA5F60196D407048BBA8D2146828EBCB0488D8842FD56BB4F6DF8E19C4B4DAAB8AC", + "098084B51FD13DEAE5F4320DE94A688EE07BAEA2800486689A8636117B46C1F4C1F6AF7F74AE7C857600456A58A3AF251DC4723A64CC7C0A5AB6D9CAC91C20BB", + "6044540D560853EB1C57DF0077DD381094781CDB9073E5B1B3D3F6C7829E12066BBACA96D989A690DE72CA3133A83652BA284A6D62942B271FFA2620C9E75B1F", + "7A8CFE9B90F75F7ECB3ACC053AAED6193112B6F6A4AEEB3F65D3DE541942DEB9E2228152A3C4BBBE72FC3B12629528CFBB09FE630F0474339F54ABF453E2ED52", + "380BEAF6EA7CC9365E270EF0E6F3A64FB902ACAE51DD5512F84259AD2C91F4BC4108DB73192A5BBFB0CBCF71E46C3E21AEE1C5E860DC96E8EB0B7B8426E6ABE9", + "60FE3C4535E1B59D9A61EA8500BFAC41A69DFFB1CEADD9ACA323E9A625B64DA5763BAD7226DA02B9C8C4F1A5DE140AC5A6C1124E4F718CE0B28EA47393AA6637", + "4FE181F54AD63A2983FEAAF77D1E7235C2BEB17FA328B6D9505BDA327DF19FC37F02C4B6F0368CE23147313A8E5738B5FA2A95B29DE1C7F8264EB77B69F585CD", + "F228773CE3F3A42B5F144D63237A72D99693ADB8837D0E112A8A0F8FFFF2C362857AC49C11EC740D1500749DAC9B1F4548108BF3155794DCC9E4082849E2B85B", + "962452A8455CC56C8511317E3B1F3B2C37DF75F588E94325FDD77070359CF63A9AE6E930936FDF8E1E08FFCA440CFB72C28F06D89A2151D1C46CD5B268EF8563", + "43D44BFA18768C59896BF7ED1765CB2D14AF8C260266039099B25A603E4DDC5039D6EF3A91847D1088D401C0C7E847781A8A590D33A3C6CB4DF0FAB1C2F22355", + "DCFFA9D58C2A4CA2CDBB0C7AA4C4C1D45165190089F4E983BB1C2CAB4AAEFF1FA2B5EE516FECD780540240BF37E56C8BCCA7FAB980E1E61C9400D8A9A5B14AC6", + "6FBF31B45AB0C0B8DAD1C0F5F4061379912DDE5AA922099A030B725C73346C524291ADEF89D2F6FD8DFCDA6D07DAD811A9314536C2915ED45DA34947E83DE34E", + "A0C65BDDDE8ADEF57282B04B11E7BC8AAB105B99231B750C021F4A735CB1BCFAB87553BBA3ABB0C3E64A0B6955285185A0BD35FB8CFDE557329BEBB1F629EE93", + "F99D815550558E81ECA2F96718AED10D86F3F1CFB675CCE06B0EFF02F617C5A42C5AA760270F2679DA2677C5AEB94F1142277F21C7F79F3C4F0CCE4ED8EE62B1", + "95391DA8FC7B917A2044B3D6F5374E1CA072B41454D572C7356C05FD4BC1E0F40B8BB8B4A9F6BCE9BE2C4623C399B0DCA0DAB05CB7281B71A21B0EBCD9E55670", + "04B9CD3D20D221C09AC86913D3DC63041989A9A1E694F1E639A3BA7E451840F750C2FC191D56AD61F2E7936BC0AC8E094B60CAEED878C18799045402D61CEAF9", + "EC0E0EF707E4ED6C0C66F9E089E4954B058030D2DD86398FE84059631F9EE591D9D77375355149178C0CF8F8E7C49ED2A5E4F95488A2247067C208510FADC44C", + "9A37CCE273B79C09913677510EAF7688E89B3314D3532FD2764C39DE022A2945B5710D13517AF8DDC0316624E73BEC1CE67DF15228302036F330AB0CB4D218DD", + "4CF9BB8FB3D4DE8B38B2F262D3C40F46DFE747E8FC0A414C193D9FCF753106CE47A18F172F12E8A2F1C26726545358E5EE28C9E2213A8787AAFBC516D2343152", + "64E0C63AF9C808FD893137129867FD91939D53F2AF04BE4FA268006100069B2D69DAA5C5D8ED7FDDCB2A70EEECDF2B105DD46A1E3B7311728F639AB489326BC9", + "5E9C93158D659B2DEF06B0C3C7565045542662D6EEE8A96A89B78ADE09FE8B3DCC096D4FE48815D88D8F82620156602AF541955E1F6CA30DCE14E254C326B88F", + "7775DFF889458DD11AEF417276853E21335EB88E4DEC9CFB4E9EDB49820088551A2CA60339F12066101169F0DFE84B098FDDB148D9DA6B3D613DF263889AD64B", + "F0D2805AFBB91F743951351A6D024F9353A23C7CE1FC2B051B3A8B968C233F46F50F806ECB1568FFAA0B60661E334B21DDE04F8FA155AC740EEB42E20B60D764", + "86A2AF316E7D7754201B942E275364AC12EA8962AB5BD8D7FB276DC5FBFFC8F9A28CAE4E4867DF6780D9B72524160927C855DA5B6078E0B554AA91E31CB9CA1D", + "10BDF0CAA0802705E706369BAF8A3F79D72C0A03A80675A7BBB00BE3A45E516424D1EE88EFB56F6D5777545AE6E27765C3A8F5E493FC308915638933A1DFEE55", + "B01781092B1748459E2E4EC178696627BF4EBAFEBBA774ECF018B79A68AEB84917BF0B84BB79D17B743151144CD66B7B33A4B9E52C76C4E112050FF5385B7F0B", + "C6DBC61DEC6EAEAC81E3D5F755203C8E220551534A0B2FD105A91889945A638550204F44093DD998C076205DFFAD703A0E5CD3C7F438A7E634CD59FEDEDB539E", + "EBA51ACFFB4CEA31DB4B8D87E9BF7DD48FE97B0253AE67AA580F9AC4A9D941F2BEA518EE286818CC9F633F2A3B9FB68E594B48CDD6D515BF1D52BA6C85A203A7", + "86221F3ADA52037B72224F105D7999231C5E5534D03DA9D9C0A12ACB68460CD375DAF8E24386286F9668F72326DBF99BA094392437D398E95BB8161D717F8991", + "5595E05C13A7EC4DC8F41FB70CB50A71BCE17C024FF6DE7AF618D0CC4E9C32D9570D6D3EA45B86525491030C0D8F2B1836D5778C1CE735C17707DF364D054347", + "CE0F4F6ACA89590A37FE034DD74DD5FA65EB1CBD0A41508AADDC09351A3CEA6D18CB2189C54B700C009F4CBF0521C7EA01BE61C5AE09CB54F27BC1B44D658C82", + "7EE80B06A215A3BCA970C77CDA8761822BC103D44FA4B33F4D07DCB997E36D55298BCEAE12241B3FA07FA63BE5576068DA387B8D5859AEAB701369848B176D42", + "940A84B6A84D109AAB208C024C6CE9647676BA0AAA11F86DBB7018F9FD2220A6D901A9027F9ABCF935372727CBF09EBD61A2A2EEB87653E8ECAD1BAB85DC8327", + "2020B78264A82D9F4151141ADBA8D44BF20C5EC062EEE9B595A11F9E84901BF148F298E0C9F8777DCDBC7CC4670AAC356CC2AD8CCB1629F16F6A76BCEFBEE760", + "D1B897B0E075BA68AB572ADF9D9C436663E43EB3D8E62D92FC49C9BE214E6F27873FE215A65170E6BEA902408A25B49506F47BABD07CECF7113EC10C5DD31252", + "B14D0C62ABFA469A357177E594C10C194243ED2025AB8AA5AD2FA41AD318E0FF48CD5E60BEC07B13634A711D2326E488A985F31E31153399E73088EFC86A5C55", + "4169C5CC808D2697DC2A82430DC23E3CD356DC70A94566810502B8D655B39ABF9E7F902FE717E0389219859E1945DF1AF6ADA42E4CCDA55A197B7100A30C30A1", + "258A4EDB113D66C839C8B1C91F15F35ADE609F11CD7F8681A4045B9FEF7B0B24C82CDA06A5F2067B368825E3914E53D6948EDE92EFD6E8387FA2E537239B5BEE", + "79D2D8696D30F30FB34657761171A11E6C3F1E64CBE7BEBEE159CB95BFAF812B4F411E2F26D9C421DC2C284A3342D823EC293849E42D1E46B0A4AC1E3C86ABAA", + "8B9436010DC5DEE992AE38AEA97F2CD63B946D94FEDD2EC9671DCDE3BD4CE9564D555C66C15BB2B900DF72EDB6B891EBCADFEFF63C9EA4036A998BE7973981E7", + "C8F68E696ED28242BF997F5B3B34959508E42D613810F1E2A435C96ED2FF560C7022F361A9234B9837FEEE90BF47922EE0FD5F8DDF823718D86D1E16C6090071", + "B02D3EEE4860D5868B2C39CE39BFE81011290564DD678C85E8783F29302DFC1399BA95B6B53CD9EBBF400CCA1DB0AB67E19A325F2D115812D25D00978AD1BCA4", + "7693EA73AF3AC4DAD21CA0D8DA85B3118A7D1C6024CFAF557699868217BC0C2F44A199BC6C0EDD519798BA05BD5B1B4484346A47C2CADF6BF30B785CC88B2BAF", + "A0E5C1C0031C02E48B7F09A5E896EE9AEF2F17FC9E18E997D7F6CAC7AE316422C2B1E77984E5F3A73CB45DEED5D3F84600105E6EE38F2D090C7D0442EA34C46D", + "41DAA6ADCFDB69F1440C37B596440165C15ADA596813E2E22F060FCD551F24DEE8E04BA6890387886CEEC4A7A0D7FC6B44506392EC3822C0D8C1ACFC7D5AEBE8", + "14D4D40D5984D84C5CF7523B7798B254E275A3A8CC0A1BD06EBC0BEE726856ACC3CBF516FF667CDA2058AD5C3412254460A82C92187041363CC77A4DC215E487", + "D0E7A1E2B9A447FEE83E2277E9FF8010C2F375AE12FA7AAA8CA5A6317868A26A367A0B69FBC1CF32A55D34EB370663016F3D2110230EBA754028A56F54ACF57C", + "E771AA8DB5A3E043E8178F39A0857BA04A3F18E4AA05743CF8D222B0B095825350BA422F63382A23D92E4149074E816A36C1CD28284D146267940B31F8818EA2", + "FEB4FD6F9E87A56BEF398B3284D2BDA5B5B0E166583A66B61E538457FF0584872C21A32962B9928FFAB58DE4AF2EDD4E15D8B35570523207FF4E2A5AA7754CAA", + "462F17BF005FB1C1B9E671779F665209EC2873E3E411F98DABF240A1D5EC3F95CE6796B6FC23FE171903B502023467DEC7273FF74879B92967A2A43A5A183D33", + "D3338193B64553DBD38D144BEA71C5915BB110E2D88180DBC5DB364FD6171DF317FC7268831B5AEF75E4342B2FAD8797BA39EDDCEF80E6EC08159350B1AD696D", + "E1590D585A3D39F7CB599ABD479070966409A6846D4377ACF4471D065D5DB94129CC9BE92573B05ED226BE1E9B7CB0CABE87918589F80DADD4EF5EF25A93D28E", + "F8F3726AC5A26CC80132493A6FEDCB0E60760C09CFC84CAD178175986819665E76842D7B9FEDF76DDDEBF5D3F56FAAAD4477587AF21606D396AE570D8E719AF2", + "30186055C07949948183C850E9A756CC09937E247D9D928E869E20BAFC3CD9721719D34E04A0899B92C736084550186886EFBA2E790D8BE6EBF040B209C439A4", + "F3C4276CB863637712C241C444C5CC1E3554E0FDDB174D035819DD83EB700B4CE88DF3AB3841BA02085E1A99B4E17310C5341075C0458BA376C95A6818FBB3E2", + "0AA007C4DD9D5832393040A1583C930BCA7DC5E77EA53ADD7E2B3F7C8E231368043520D4A3EF53C969B6BBFD025946F632BD7F765D53C21003B8F983F75E2A6A", + "08E9464720533B23A04EC24F7AE8C103145F765387D738777D3D343477FD1C58DB052142CAB754EA674378E18766C53542F71970171CC4F81694246B717D7564", + "D37FF7AD297993E7EC21E0F1B4B5AE719CDC83C5DB687527F27516CBFFA822888A6810EE5C1CA7BFE3321119BE1AB7BFA0A502671C8329494DF7AD6F522D440F", + "DD9042F6E464DCF86B1262F6ACCFAFBD8CFD902ED3ED89ABF78FFA482DBDEEB6969842394C9A1168AE3D481A017842F660002D42447C6B22F7B72F21AAE021C9", + "BD965BF31E87D70327536F2A341CEBC4768ECA275FA05EF98F7F1B71A0351298DE006FBA73FE6733ED01D75801B4A928E54231B38E38C562B2E33EA1284992FA", + "65676D800617972FBD87E4B9514E1C67402B7A331096D3BFAC22F1ABB95374ABC942F16E9AB0EAD33B87C91968A6E509E119FF07787B3EF483E1DCDCCF6E3022", + "939FA189699C5D2C81DDD1FFC1FA207C970B6A3685BB29CE1D3E99D42F2F7442DA53E95A72907314F4588399A3FF5B0A92BEB3F6BE2694F9F86ECF2952D5B41C", + "C516541701863F91005F314108CEECE3C643E04FC8C42FD2FF556220E616AAA6A48AEB97A84BAD74782E8DFF96A1A2FA949339D722EDCAA32B57067041DF88CC", + "987FD6E0D6857C553EAEBB3D34970A2C2F6E89A3548F492521722B80A1C21A153892346D2CBA6444212D56DA9A26E324DCCBC0DCDE85D4D2EE4399EEC5A64E8F", + "AE56DEB1C2328D9C4017706BCE6E99D41349053BA9D336D677C4C27D9FD50AE6AEE17E853154E1F4FE7672346DA2EAA31EEA53FCF24A22804F11D03DA6ABFC2B", + "49D6A608C9BDE4491870498572AC31AAC3FA40938B38A7818F72383EB040AD39532BC06571E13D767E6945AB77C0BDC3B0284253343F9F6C1244EBF2FF0DF866", + "DA582AD8C5370B4469AF862AA6467A2293B2B28BD80AE0E91F425AD3D47249FDF98825CC86F14028C3308C9804C78BFEEEEE461444CE243687E1A50522456A1D", + "D5266AA3331194AEF852EED86D7B5B2633A0AF1C735906F2E13279F14931A9FC3B0EAC5CE9245273BD1AA92905ABE16278EF7EFD47694789A7283B77DA3C70F8", + "2962734C28252186A9A1111C732AD4DE4506D4B4480916303EB7991D659CCDA07A9911914BC75C418AB7A4541757AD054796E26797FEAF36E9F6AD43F14B35A4", + "E8B79EC5D06E111BDFAFD71E9F5760F00AC8AC5D8BF768F9FF6F08B8F026096B1CC3A4C973333019F1E3553E77DA3F98CB9F542E0A90E5F8A940CC58E59844B3", + "DFB320C44F9D41D1EFDCC015F08DD5539E526E39C87D509AE6812A969E5431BF4FA7D91FFD03B981E0D544CF72D7B1C0374F8801482E6DEA2EF903877EBA675E", + "D88675118FDB55A5FB365AC2AF1D217BF526CE1EE9C94B2F0090B2C58A06CA58187D7FE57C7BED9D26FCA067B4110EEFCD9A0A345DE872ABE20DE368001B0745", + "B893F2FC41F7B0DD6E2F6AA2E0370C0CFF7DF09E3ACFCC0E920B6E6FAD0EF747C40668417D342B80D2351E8C175F20897A062E9765E6C67B539B6BA8B9170545", + "6C67EC5697ACCD235C59B486D7B70BAEEDCBD4AA64EBD4EEF3C7EAC189561A726250AEC4D48CADCAFBBE2CE3C16CE2D691A8CCE06E8879556D4483ED7165C063", + "F1AA2B044F8F0C638A3F362E677B5D891D6FD2AB0765F6EE1E4987DE057EAD357883D9B405B9D609EEA1B869D97FB16D9B51017C553F3B93C0A1E0F1296FEDCD", + "CBAA259572D4AEBFC1917ACDDC582B9F8DFAA928A198CA7ACD0F2AA76A134A90252E6298A65B08186A350D5B7626699F8CB721A3EA5921B753AE3A2DCE24BA3A", + "FA1549C9796CD4D303DCF452C1FBD5744FD9B9B47003D920B92DE34839D07EF2A29DED68F6FC9E6C45E071A2E48BD50C5084E96B657DD0404045A1DDEFE282ED", + "5CF2AC897AB444DCB5C8D87C495DBDB34E1838B6B629427CAA51702AD0F9688525F13BEC503A3C3A2C80A65E0B5715E8AFAB00FFA56EC455A49A1AD30AA24FCD", + "9AAF80207BACE17BB7AB145757D5696BDE32406EF22B44292EF65D4519C3BB2AD41A59B62CC3E94B6FA96D32A7FAADAE28AF7D35097219AA3FD8CDA31E40C275", + "AF88B163402C86745CB650C2988FB95211B94B03EF290EED9662034241FD51CF398F8073E369354C43EAE1052F9B63B08191CAA138AA54FEA889CC7024236897", + "48FA7D64E1CEEE27B9864DB5ADA4B53D00C9BC7626555813D3CD6730AB3CC06FF342D727905E33171BDE6E8476E77FB1720861E94B73A2C538D254746285F430", + "0E6FD97A85E904F87BFE85BBEB34F69E1F18105CF4ED4F87AEC36C6E8B5F68BD2A6F3DC8A9ECB2B61DB4EEDB6B2EA10BF9CB0251FB0F8B344ABF7F366B6DE5AB", + "06622DA5787176287FDC8FED440BAD187D830099C94E6D04C8E9C954CDA70C8BB9E1FC4A6D0BAA831B9B78EF6648681A4867A11DA93EE36E5E6A37D87FC63F6F", + "1DA6772B58FABF9C61F68D412C82F182C0236D7D575EF0B58DD22458D643CD1DFC93B03871C316D8430D312995D4197F0874C99172BA004A01EE295ABAC24E46", + "3CD2D9320B7B1D5FB9AAB951A76023FA667BE14A9124E394513918A3F44096AE4904BA0FFC150B63BC7AB1EEB9A6E257E5C8F000A70394A5AFD842715DE15F29", + "04CDC14F7434E0B4BE70CB41DB4C779A88EAEF6ACCEBCB41F2D42FFFE7F32A8E281B5C103A27021D0D08362250753CDF70292195A53A48728CEB5844C2D98BAB", + "9071B7A8A075D0095B8FB3AE5113785735AB98E2B52FAF91D5B89E44AAC5B5D4EBBF91223B0FF4C71905DA55342E64655D6EF8C89A4768C3F93A6DC0366B5BC8", + "EBB30240DD96C7BC8D0ABE49AA4EDCBB4AFDC51FF9AAF720D3F9E7FBB0F9C6D6571350501769FC4EBD0B2141247FF400D4FD4BE414EDF37757BB90A32AC5C65A", + "8532C58BF3C8015D9D1CBE00EEF1F5082F8F3632FBE9F1ED4F9DFB1FA79E8283066D77C44C4AF943D76B300364AECBD0648C8A8939BD204123F4B56260422DEC", + "FE9846D64F7C7708696F840E2D76CB4408B6595C2F81EC6A28A7F2F20CB88CFE6AC0B9E9B8244F08BD7095C350C1D0842F64FB01BB7F532DFCD47371B0AEEB79", + "28F17EA6FB6C42092DC264257E29746321FB5BDAEA9873C2A7FA9D8F53818E899E161BC77DFE8090AFD82BF2266C5C1BC930A8D1547624439E662EF695F26F24", + "EC6B7D7F030D4850ACAE3CB615C21DD25206D63E84D1DB8D957370737BA0E98467EA0CE274C66199901EAEC18A08525715F53BFDB0AACB613D342EBDCEEDDC3B", + "B403D3691C03B0D3418DF327D5860D34BBFCC4519BFBCE36BF33B208385FADB9186BC78A76C489D89FD57E7DC75412D23BCD1DAE8470CE9274754BB8585B13C5", + "31FC79738B8772B3F55CD8178813B3B52D0DB5A419D30BA9495C4B9DA0219FAC6DF8E7C23A811551A62B827F256ECDB8124AC8A6792CCFECC3B3012722E94463", + "BB2039EC287091BCC9642FC90049E73732E02E577E2862B32216AE9BEDCD730C4C284EF3968C368B7D37584F97BD4B4DC6EF6127ACFE2E6AE2509124E66C8AF4", + "F53D68D13F45EDFCB9BD415E2831E938350D5380D3432278FC1C0C381FCB7C65C82DAFE051D8C8B0D44E0974A0E59EC7BF7ED0459F86E96F329FC79752510FD3", + "8D568C7984F0ECDF7640FBC483B5D8C9F86634F6F43291841B309A350AB9C1137D24066B09DA9944BAC54D5BB6580D836047AAC74AB724B887EBF93D4B32ECA9", + "C0B65CE5A96FF774C456CAC3B5F2C4CD359B4FF53EF93A3DA0778BE4900D1E8DA1601E769E8F1B02D2A2F8C5B9FA10B44F1C186985468FEEB008730283A6657D", + "4900BBA6F5FB103ECE8EC96ADA13A5C3C85488E05551DA6B6B33D988E611EC0FE2E3C2AA48EA6AE8986A3A231B223C5D27CEC2EADDE91CE07981EE652862D1E4", + "C7F5C37C7285F927F76443414D4357FF789647D7A005A5A787E03C346B57F49F21B64FA9CF4B7E45573E23049017567121A9C3D4B2B73EC5E9413577525DB45A", + "EC7096330736FDB2D64B5653E7475DA746C23A4613A82687A28062D3236364284AC01720FFB406CFE265C0DF626A188C9E5963ACE5D3D5BB363E32C38C2190A6", + "82E744C75F4649EC52B80771A77D475A3BC091989556960E276A5F9EAD92A03F718742CDCFEAEE5CB85C44AF198ADC43A4A428F5F0C2DDB0BE36059F06D7DF73", + "2834B7A7170F1F5B68559AB78C1050EC21C919740B784A9072F6E5D69F828D70C919C5039FB148E39E2C8A52118378B064CA8D5001CD10A5478387B966715ED6", + "16B4ADA883F72F853BB7EF253EFCAB0C3E2161687AD61543A0D2824F91C1F81347D86BE709B16996E17F2DD486927B0288AD38D13063C4A9672C39397D3789B6", + "78D048F3A69D8B54AE0ED63A573AE350D89F7C6CF1F3688930DE899AFA037697629B314E5CD303AA62FEEA72A25BF42B304B6C6BCB27FAE21C16D925E1FBDAC3", + "0F746A48749287ADA77A82961F05A4DA4ABDB7D77B1220F836D09EC814359C0EC0239B8C7B9FF9E02F569D1B301EF67C4612D1DE4F730F81C12C40CC063C5CAA", + "F0FC859D3BD195FBDC2D591E4CDAC15179EC0F1DC821C11DF1F0C1D26E6260AAA65B79FAFACAFD7D3AD61E600F250905F5878C87452897647A35B995BCADC3A3", + "2620F687E8625F6A412460B42E2CEF67634208CE10A0CBD4DFF7044A41B7880077E9F8DC3B8D1216D3376A21E015B58FB279B521D83F9388C7382C8505590B9B", + "227E3AED8D2CB10B918FCB04F9DE3E6D0A57E08476D93759CD7B2ED54A1CBF0239C528FB04BBF288253E601D3BC38B21794AFEF90B17094A182CAC557745E75F", + "1A929901B09C25F27D6B35BE7B2F1C4745131FDEBCA7F3E2451926720434E0DB6E74FD693AD29B777DC3355C592A361C4873B01133A57C2E3B7075CBDB86F4FC", + "5FD7968BC2FE34F220B5E3DC5AF9571742D73B7D60819F2888B629072B96A9D8AB2D91B82D0A9AABA61BBD39958132FCC4257023D1ECA591B3054E2DC81C8200", + "DFCCE8CF32870CC6A503EADAFC87FD6F78918B9B4D0737DB6810BE996B5497E7E5CC80E312F61E71FF3E9624436073156403F735F56B0B01845C18F6CAF772E6", + "02F7EF3A9CE0FFF960F67032B296EFCA3061F4934D690749F2D01C35C81C14F39A67FA350BC8A0359BF1724BFFC3BCA6D7C7BBA4791FD522A3AD353C02EC5AA8", + "64BE5C6ABA65D594844AE78BB022E5BEBE127FD6B6FFA5A13703855AB63B624DCD1A363F99203F632EC386F3EA767FC992E8ED9686586AA27555A8599D5B808F", + "F78585505C4EAA54A8B5BE70A61E735E0FF97AF944DDB3001E35D86C4E2199D976104B6AE31750A36A726ED285064F5981B503889FEF822FCDC2898DDDB7889A", + "E4B5566033869572EDFD87479A5BB73C80E8759B91232879D96B1DDA36C012076EE5A2ED7AE2DE63EF8406A06AEA82C188031B560BEAFB583FB3DE9E57952A7E", + "E1B3E7ED867F6C9484A2A97F7715F25E25294E992E41F6A7C161FFC2ADC6DAAEB7113102D5E6090287FE6AD94CE5D6B739C6CA240B05C76FB73F25DD024BF935", + "85FD085FDC12A080983DF07BD7012B0D402A0F4043FCB2775ADF0BAD174F9B08D1676E476985785C0A5DCC41DBFF6D95EF4D66A3FBDC4A74B82BA52DA0512B74", + "AED8FA764B0FBFF821E05233D2F7B0900EC44D826F95E93C343C1BC3BA5A24374B1D616E7E7ABA453A0ADA5E4FAB5382409E0D42CE9C2BC7FB39A99C340C20F0", + "7BA3B2E297233522EEB343BD3EBCFD835A04007735E87F0CA300CBEE6D416565162171581E4020FF4CF176450F1291EA2285CB9EBFFE4C56660627685145051C", + "DE748BCF89EC88084721E16B85F30ADB1A6134D664B5843569BABC5BBD1A15CA9B61803C901A4FEF32965A1749C9F3A4E243E173939DC5A8DC495C671AB52145", + "AAF4D2BDF200A919706D9842DCE16C98140D34BC433DF320ABA9BD429E549AA7A3397652A4D768277786CF993CDE2338673ED2E6B66C961FEFB82CD20C93338F", + "C408218968B788BF864F0997E6BC4C3DBA68B276E2125A4843296052FF93BF5767B8CDCE7131F0876430C1165FEC6C4F47ADAA4FD8BCFACEF463B5D3D0FA61A0", + "76D2D819C92BCE55FA8E092AB1BF9B9EAB237A25267986CACF2B8EE14D214D730DC9A5AA2D7B596E86A1FD8FA0804C77402D2FCD45083688B218B1CDFA0DCBCB", + "72065EE4DD91C2D8509FA1FC28A37C7FC9FA7D5B3F8AD3D0D7A25626B57B1B44788D4CAF806290425F9890A3A2A35A905AB4B37ACFD0DA6E4517B2525C9651E4", + "64475DFE7600D7171BEA0B394E27C9B00D8E74DD1E416A79473682AD3DFDBB706631558055CFC8A40E07BD015A4540DCDEA15883CBBF31412DF1DE1CD4152B91", + "12CD1674A4488A5D7C2B3160D2E2C4B58371BEDAD793418D6F19C6EE385D70B3E06739369D4DF910EDB0B0A54CBFF43D54544CD37AB3A06CFA0A3DDAC8B66C89", + "60756966479DEDC6DD4BCFF8EA7D1D4CE4D4AF2E7B097E32E3763518441147CC12B3C0EE6D2ECABF1198CEC92E86A3616FBA4F4E872F5825330ADBB4C1DEE444", + "A7803BCB71BC1D0F4383DDE1E0612E04F872B715AD30815C2249CF34ABB8B024915CB2FC9F4E7CC4C8CFD45BE2D5A91EAB0941C7D270E2DA4CA4A9F7AC68663A", + "B84EF6A7229A34A750D9A98EE2529871816B87FBE3BC45B45FA5AE82D5141540211165C3C5D7A7476BA5A4AA06D66476F0D9DC49A3F1EE72C3ACABD498967414", + "FAE4B6D8EFC3F8C8E64D001DABEC3A21F544E82714745251B2B4B393F2F43E0DA3D403C64DB95A2CB6E23EBB7B9E94CDD5DDAC54F07C4A61BD3CB10AA6F93B49", + "34F7286605A122369540141DED79B8957255DA2D4155ABBF5A8DBB89C8EB7EDE8EEEF1DAA46DC29D751D045DC3B1D658BB64B80FF8589EDDB3824B13DA235A6B", + "3B3B48434BE27B9EABABBA43BF6B35F14B30F6A88DC2E750C358470D6B3AA3C18E47DB4017FA55106D8252F016371A00F5F8B070B74BA5F23CFFC5511C9F09F0", + "BA289EBD6562C48C3E10A8AD6CE02E73433D1E93D7C9279D4D60A7E879EE11F441A000F48ED9F7C4ED87A45136D7DCCDCA482109C78A51062B3BA4044ADA2469", + "022939E2386C5A37049856C850A2BB10A13DFEA4212B4C732A8840A9FFA5FAF54875C5448816B2785A007DA8A8D2BC7D71A54E4E6571F10B600CBDB25D13EDE3", + "E6FEC19D89CE8717B1A087024670FE026F6C7CBDA11CAEF959BB2D351BF856F8055D1C0EBDAAA9D1B17886FC2C562B5E99642FC064710C0D3488A02B5ED7F6FD", + "94C96F02A8F576ACA32BA61C2B206F907285D9299B83AC175C209A8D43D53BFE683DD1D83E7549CB906C28F59AB7C46F8751366A28C39DD5FE2693C9019666C8", + "31A0CD215EBD2CB61DE5B9EDC91E6195E31C59A5648D5C9F737E125B2605708F2E325AB3381C8DCE1A3E958886F1ECDC60318F882CFE20A24191352E617B0F21", + "91AB504A522DCE78779F4C6C6BA2E6B6DB5565C76D3E7E7C920CAF7F757EF9DB7C8FCF10E57F03379EA9BF75EB59895D96E149800B6AAE01DB778BB90AFBC989", + "D85CABC6BD5B1A01A5AFD8C6734740DA9FD1C1ACC6DB29BFC8A2E5B668B028B6B3154BFB8703FA3180251D589AD38040CEB707C4BAD1B5343CB426B61EAA49C1", + "D62EFBEC2CA9C1F8BD66CE8B3F6A898CB3F7566BA6568C618AD1FEB2B65B76C3CE1DD20F7395372FAF28427F61C9278049CF0140DF434F5633048C86B81E0399", + "7C8FDC6175439E2C3DB15BAFA7FB06143A6A23BC90F449E79DEEF73C3D492A671715C193B6FEA9F036050B946069856B897E08C00768F5EE5DDCF70B7CD6D0E0", + "58602EE7468E6BC9DF21BD51B23C005F72D6CB013F0A1B48CBEC5ECA299299F97F09F54A9A01483EAEB315A6478BAD37BA47CA1347C7C8FC9E6695592C91D723", + "27F5B79ED256B050993D793496EDF4807C1D85A7B0A67C9C4FA99860750B0AE66989670A8FFD7856D7CE411599E58C4D77B232A62BEF64D15275BE46A68235FF", + "3957A976B9F1887BF004A8DCA942C92D2B37EA52600F25E0C9BC5707D0279C00C6E85A839B0D2D8EB59C51D94788EBE62474A791CADF52CCCF20F5070B6573FC", + "EAA2376D55380BF772ECCA9CB0AA4668C95C707162FA86D518C8CE0CA9BF7362B9F2A0ADC3FF59922DF921B94567E81E452F6C1A07FC817CEBE99604B3505D38", + "C1E2C78B6B2734E2480EC550434CB5D613111ADCC21D475545C3B1B7E6FF12444476E5C055132E2229DC0F807044BB919B1A5662DD38A9EE65E243A3911AED1A", + "8AB48713389DD0FCF9F965D3CE66B1E559A1F8C58741D67683CD971354F452E62D0207A65E436C5D5D8F8EE71C6ABFE50E669004C302B31A7EA8311D4A916051", + "24CE0ADDAA4C65038BD1B1C0F1452A0B128777AABC94A29DF2FD6C7E2F85F8AB9AC7EFF516B0E0A825C84A24CFE492EAAD0A6308E46DD42FE8333AB971BB30CA", + "5154F929EE03045B6B0C0004FA778EDEE1D139893267CC84825AD7B36C63DE32798E4A166D24686561354F63B00709A1364B3C241DE3FEBF0754045897467CD4", + "E74E907920FD87BD5AD636DD11085E50EE70459C443E1CE5809AF2BC2EBA39F9E6D7128E0E3712C316DA06F4705D78A4838E28121D4344A2C79C5E0DB307A677", + "BF91A22334BAC20F3FD80663B3CD06C4E8802F30E6B59F90D3035CC9798A217ED5A31ABBDA7FA6842827BDF2A7A1C21F6FCFCCBB54C6C52926F32DA816269BE1", + "D9D5C74BE5121B0BD742F26BFFB8C89F89171F3F934913492B0903C271BBE2B3395EF259669BEF43B57F7FCC3027DB01823F6BAEE66E4F9FEAD4D6726C741FCE", + "50C8B8CF34CD879F80E2FAAB3230B0C0E1CC3E9DCADEB1B9D97AB923415DD9A1FE38ADDD5C11756C67990B256E95AD6D8F9FEDCE10BF1C90679CDE0ECF1BE347", + "0A386E7CD5DD9B77A035E09FE6FEE2C8CE61B5383C87EA43205059C5E4CD4F4408319BB0A82360F6A58E6C9CE3F487C446063BF813BC6BA535E17FC1826CFC91", + "1F1459CB6B61CBAC5F0EFE8FC487538F42548987FCD56221CFA7BEB22504769E792C45ADFB1D6B3D60D7B749C8A75B0BDF14E8EA721B95DCA538CA6E25711209", + "E58B3836B7D8FEDBB50CA5725C6571E74C0785E97821DAB8B6298C10E4C079D4A6CDF22F0FEDB55032925C16748115F01A105E77E00CEE3D07924DC0D8F90659", + "B929CC6505F020158672DEDA56D0DB081A2EE34C00C1100029BDF8EA98034FA4BF3E8655EC697FE36F40553C5BB46801644A627D3342F4FC92B61F03290FB381", + "72D353994B49D3E03153929A1E4D4F188EE58AB9E72EE8E512F29BC773913819CE057DDD7002C0433EE0A16114E3D156DD2C4A7E80EE53378B8670F23E33EF56", + "C70EF9BFD775D408176737A0736D68517CE1AAAD7E81A93C8C1ED967EA214F56C8A377B1763E676615B60F3988241EAE6EAB9685A5124929D28188F29EAB06F7", + "C230F0802679CB33822EF8B3B21BF7A9A28942092901D7DAC3760300831026CF354C9232DF3E084D9903130C601F63C1F4A4A4B8106E468CD443BBE5A734F45F", + "6F43094CAFB5EBF1F7A4937EC50F56A4C9DA303CBB55AC1F27F1F1976CD96BEDA9464F0E7B9C54620B8A9FBA983164B8BE3578425A024F5FE199C36356B88972", + "3745273F4C38225DB2337381871A0C6AAFD3AF9B018C88AA02025850A5DC3A42A1A3E03E56CBF1B0876D63A441F1D2856A39B8801EB5AF325201C415D65E97FE", + "C50C44CCA3EC3EDAAE779A7E179450EBDDA2F97067C690AA6C5A4AC7C30139BB27C0DF4DB3220E63CB110D64F37FFE078DB72653E2DAACF93AE3F0A2D1A7EB2E", + "8AEF263E385CBC61E19B28914243262AF5AFE8726AF3CE39A79C27028CF3ECD3F8D2DFD9CFC9AD91B58F6F20778FD5F02894A3D91C7D57D1E4B866A7F364B6BE", + "28696141DE6E2D9BCB3235578A66166C1448D3E905A1B482D423BE4BC5369BC8C74DAE0ACC9CC123E1D8DDCE9F97917E8C019C552DA32D39D2219B9ABF0FA8C8", + "2FB9EB2085830181903A9DAFE3DB428EE15BE7662224EFD643371FB25646AEE716E531ECA69B2BDC8233F1A8081FA43DA1500302975A77F42FA592136710E9DC", + "66F9A7143F7A3314A669BF2E24BBB35014261D639F495B6C9C1F104FE8E320ACA60D4550D69D52EDBD5A3CDEB4014AE65B1D87AA770B69AE5C15F4330B0B0AD8", + "F4C4DD1D594C3565E3E25CA43DAD82F62ABEA4835ED4CD811BCD975E46279828D44D4C62C3679F1B7F7B9DD4571D7B49557347B8C5460CBDC1BEF690FB2A08C0", + "8F1DC9649C3A84551F8F6E91CAC68242A43B1F8F328EE92280257387FA7559AA6DB12E4AEADC2D26099178749C6864B357F3F83B2FB3EFA8D2A8DB056BED6BCC", + "3139C1A7F97AFD1675D460EBBC07F2728AA150DF849624511EE04B743BA0A833092F18C12DC91B4DD243F333402F59FE28ABDBBBAE301E7B659C7A26D5C0F979", + "06F94A2996158A819FE34C40DE3CF0379FD9FB85B3E363BA3926A0E7D960E3F4C2E0C70C7CE0CCB2A64FC29869F6E7AB12BD4D3F14FCE943279027E785FB5C29", + "C29C399EF3EEE8961E87565C1CE263925FC3D0CE267D13E48DD9E732EE67B0F69FAD56401B0F10FCAAC119201046CCA28C5B14ABDEA3212AE65562F7F138DB3D", + "4CEC4C9DF52EEF05C3F6FAAA9791BC7445937183224ECC37A1E58D0132D35617531D7E795F52AF7B1EB9D147DE1292D345FE341823F8E6BC1E5BADCA5C656108", + "898BFBAE93B3E18D00697EAB7D9704FA36EC339D076131CEFDF30EDBE8D9CC81C3A80B129659B163A323BAB9793D4FEED92D54DAE966C77529764A09BE88DB45", + "EE9BD0469D3AAF4F14035BE48A2C3B84D9B4B1FFF1D945E1F1C1D38980A951BE197B25FE22C731F20AEACC930BA9C4A1F4762227617AD350FDABB4E80273A0F4", + "3D4D3113300581CD96ACBF091C3D0F3C310138CD6979E6026CDE623E2DD1B24D4A8638BED1073344783AD0649CC6305CCEC04BEB49F31C633088A99B65130267", + "95C0591AD91F921AC7BE6D9CE37E0663ED8011C1CFD6D0162A5572E94368BAC02024485E6A39854AA46FE38E97D6C6B1947CD272D86B06BB5B2F78B9B68D559D", + "227B79DED368153BF46C0A3CA978BFDBEF31F3024A5665842468490B0FF748AE04E7832ED4C9F49DE9B1706709D623E5C8C15E3CAECAE8D5E433430FF72F20EB", + "5D34F3952F0105EEF88AE8B64C6CE95EBFADE0E02C69B08762A8712D2E4911AD3F941FC4034DC9B2E479FDBCD279B902FAF5D838BB2E0C6495D372B5B7029813", + "7F939BF8353ABCE49E77F14F3750AF20B7B03902E1A1E7FB6AAF76D0259CD401A83190F15640E74F3E6C5A90E839C7821F6474757F75C7BF9002084DDC7A62DC", + "062B61A2F9A33A71D7D0A06119644C70B0716A504DE7E5E1BE49BD7B86E7ED6817714F9F0FC313D06129597E9A2235EC8521DE36F7290A90CCFC1FFA6D0AEE29", + "F29E01EEAE64311EB7F1C6422F946BF7BEA36379523E7B2BBABA7D1D34A22D5EA5F1C5A09D5CE1FE682CCED9A4798D1A05B46CD72DFF5C1B355440B2A2D476BC", + "EC38CD3BBAB3EF35D7CB6D5C914298351D8A9DC97FCEE051A8A02F58E3ED6184D0B7810A5615411AB1B95209C3C810114FDEB22452084E77F3F847C6DBAAFE16", + "C2AEF5E0CA43E82641565B8CB943AA8BA53550CAEF793B6532FAFAD94B816082F0113A3EA2F63608AB40437ECC0F0229CB8FA224DCF1C478A67D9B64162B92D1", + "15F534EFFF7105CD1C254D074E27D5898B89313B7D366DC2D7D87113FA7D53AAE13F6DBA487AD8103D5E854C91FDB6E1E74B2EF6D1431769C30767DDE067A35C", + "89ACBCA0B169897A0A2714C2DF8C95B5B79CB69390142B7D6018BB3E3076B099B79A964152A9D912B1B86412B7E372E9CECAD7F25D4CBAB8A317BE36492A67D7", + "E3C0739190ED849C9C962FD9DBB55E207E624FCAC1EB417691515499EEA8D8267B7E8F1287A63633AF5011FDE8C4DDF55BFDF722EDF88831414F2CFAED59CB9A", + "8D6CF87C08380D2D1506EEE46FD4222D21D8C04E585FBFD08269C98F702833A156326A0724656400EE09351D57B440175E2A5DE93CC5F80DB6DAF83576CF75FA", + "DA24BEDE383666D563EEED37F6319BAF20D5C75D1635A6BA5EF4CFA1AC95487E96F8C08AF600AAB87C986EBAD49FC70A58B4890B9C876E091016DAF49E1D322E", + "F9D1D1B1E87EA7AE753A029750CC1CF3D0157D41805E245C5617BB934E732F0AE3180B78E05BFE76C7C3051E3E3AC78B9B50C05142657E1E03215D6EC7BFD0FC", + "11B7BC1668032048AA43343DE476395E814BBBC223678DB951A1B03A021EFAC948CFBE215F97FE9A72A2F6BC039E3956BFA417C1A9F10D6D7BA5D3D32FF323E5", + "B8D9000E4FC2B066EDB91AFEE8E7EB0F24E3A201DB8B6793C0608581E628ED0BCC4E5AA6787992A4BCC44E288093E63EE83ABD0BC3EC6D0934A674A4DA13838A", + "CE325E294F9B6719D6B61278276AE06A2564C03BB0B783FAFE785BDF89C7D5ACD83E78756D301B445699024EAEB77B54D477336EC2A4F332F2B3F88765DDB0C3", + "29ACC30E9603AE2FCCF90BF97E6CC463EBE28C1B2F9B4B765E70537C25C702A29DCBFBF14C99C54345BA2B51F17B77B5F15DB92BBAD8FA95C471F5D070A137CC", + "3379CBAAE562A87B4C0425550FFDD6BFE1203F0D666CC7EA095BE407A5DFE61EE91441CD5154B3E53B4F5FB31AD4C7A9AD5C7AF4AE679AA51A54003A54CA6B2D", + "3095A349D245708C7CF550118703D7302C27B60AF5D4E67FC978F8A4E60953C7A04F92FCF41AEE64321CCB707A895851552B1E37B00BC5E6B72FA5BCEF9E3FFF", + "07262D738B09321F4DBCCEC4BB26F48CB0F0ED246CE0B31B9A6E7BC683049F1F3E5545F28CE932DD985C5AB0F43BD6DE0770560AF329065ED2E49D34624C2CBB", + "B6405ECA8EE3316C87061CC6EC18DBA53E6C250C63BA1F3BAE9E55DD3498036AF08CD272AA24D713C6020D77AB2F3919AF1A32F307420618AB97E73953994FB4", + "7EE682F63148EE45F6E5315DA81E5C6E557C2C34641FC509C7A5701088C38A74756168E2CD8D351E88FD1A451F360A01F5B2580F9B5A2E8CFC138F3DD59A3FFC", + "1D263C179D6B268F6FA016F3A4F29E943891125ED8593C81256059F5A7B44AF2DCB2030D175C00E62ECAF7EE96682AA07AB20A611024A28532B1C25B86657902", + "106D132CBDB4CD2597812846E2BC1BF732FEC5F0A5F65DBB39EC4E6DC64AB2CE6D24630D0F15A805C3540025D84AFA98E36703C3DBEE713E72DDE8465BC1BE7E", + "0E79968226650667A8D862EA8DA4891AF56A4E3A8B6D1750E394F0DEA76D640D85077BCEC2CC86886E506751B4F6A5838F7F0B5FEF765D9DC90DCDCBAF079F08", + "521156A82AB0C4E566E5844D5E31AD9AAF144BBD5A464FDCA34DBD5717E8FF711D3FFEBBFA085D67FE996A34F6D3E4E60B1396BF4B1610C263BDBB834D560816", + "1ABA88BEFC55BC25EFBCE02DB8B9933E46F57661BAEABEB21CC2574D2A518A3CBA5DC5A38E49713440B25F9C744E75F6B85C9D8F4681F676160F6105357B8406", + "5A9949FCB2C473CDA968AC1B5D08566DC2D816D960F57E63B898FA701CF8EBD3F59B124D95BFBBEDC5F1CF0E17D5EAED0C02C50B69D8A402CABCCA4433B51FD4", + "B0CEAD09807C672AF2EB2B0F06DDE46CF5370E15A4096B1A7D7CBB36EC31C205FBEFCA00B7A4162FA89FB4FB3EB78D79770C23F44E7206664CE3CD931C291E5D", + "BB6664931EC97044E45B2AE420AE1C551A8874BC937D08E969399C3964EBDBA8346CDD5D09CAAFE4C28BA7EC788191CECA65DDD6F95F18583E040D0F30D0364D", + "65BC770A5FAA3792369803683E844B0BE7EE96F29F6D6A35568006BD5590F9A4EF639B7A8061C7B0424B66B60AC34AF3119905F33A9D8C3AE18382CA9B689900", + "EA9B4DCA333336AAF839A45C6EAA48B8CB4C7DDABFFEA4F643D6357EA6628A480A5B45F2B052C1B07D1FEDCA918B6F1139D80F74C24510DCBAA4BE70EACC1B06", + "E6342FB4A780AD975D0E24BCE149989B91D360557E87994F6B457B895575CC02D0C15BAD3CE7577F4C63927FF13F3E381FF7E72BDBE745324844A9D27E3F1C01", + "3E209C9B33E8E461178AB46B1C64B49A07FB745F1C8BC95FBFB94C6B87C69516651B264EF980937FAD41238B91DDC011A5DD777C7EFD4494B4B6ECD3A9C22AC0", + "FD6A3D5B1875D80486D6E69694A56DBB04A99A4D051F15DB2689776BA1C4882E6D462A603B7015DC9F4B7450F05394303B8652CFB404A266962C41BAE6E18A94", + "951E27517E6BAD9E4195FC8671DEE3E7E9BE69CEE1422CB9FECFCE0DBA875F7B310B93EE3A3D558F941F635F668FF832D2C1D033C5E2F0997E4C66F147344E02", + "8EBA2F874F1AE84041903C7C4253C82292530FC8509550BFDC34C95C7E2889D5650B0AD8CB988E5C4894CB87FBFBB19612EA93CCC4C5CAD17158B9763464B492", + "16F712EAA1B7C6354719A8E7DBDFAF55E4063A4D277D947550019B38DFB564830911057D50506136E2394C3B28945CC964967D54E3000C2181626CFB9B73EFD2", + "C39639E7D5C7FB8CDD0FD3E6A52096039437122F21C78F1679CEA9D78A734C56ECBEB28654B4F18E342C331F6F7229EC4B4BC281B2D80A6EB50043F31796C88C", + "72D081AF99F8A173DCC9A0AC4EB3557405639A29084B54A40172912A2F8A395129D5536F0918E902F9E8FA6000995F4168DDC5F893011BE6A0DBC9B8A1A3F5BB", + "C11AA81E5EFD24D5FC27EE586CFD8847FBB0E27601CCECE5ECCA0198E3C7765393BB74457C7E7A27EB9170350E1FB53857177506BE3E762CC0F14D8C3AFE9077", + "C28F2150B452E6C0C424BCDE6F8D72007F9310FED7F2F87DE0DBB64F4479D6C1441BA66F44B2ACCEE61609177ED340128B407ECEC7C64BBE50D63D22D8627727", + "F63D88122877EC30B8C8B00D22E89000A966426112BD44166E2F525B769CCBE9B286D437A0129130DDE1A86C43E04BEDB594E671D98283AFE64CE331DE9828FD", + "348B0532880B88A6614A8D7408C3F913357FBB60E995C60205BE9139E74998AEDE7F4581E42F6B52698F7FA1219708C14498067FD1E09502DE83A77DD281150C", + "5133DC8BEF725359DFF59792D85EAF75B7E1DCD1978B01C35B1B85FCEBC63388AD99A17B6346A217DC1A9622EBD122ECF6913C4D31A6B52A695B86AF00D741A0", + "2753C4C0E98ECAD806E88780EC27FCCD0F5C1AB547F9E4BF1659D192C23AA2CC971B58B6802580BAEF8ADC3B776EF7086B2545C2987F348EE3719CDEF258C403", + "B1663573CE4B9D8CAEFC865012F3E39714B9898A5DA6CE17C25A6A47931A9DDB9BBE98ADAA553BEED436E89578455416C2A52A525CF2862B8D1D49A2531B7391", + "64F58BD6BFC856F5E873B2A2956EA0EDA0D6DB0DA39C8C7FC67C9F9FEEFCFF3072CDF9E6EA37F69A44F0C61AA0DA3693C2DB5B54960C0281A088151DB42B11E8", + "0764C7BE28125D9065C4B98A69D60AEDE703547C66A12E17E1C618994132F5EF82482C1E3FE3146CC65376CC109F0138ED9A80E49F1F3C7D610D2F2432F20605", + "F748784398A2FF03EBEB07E155E66116A839741A336E32DA71EC696001F0AD1B25CD48C69CFCA7265ECA1DD71904A0CE748AC4124F3571076DFA7116A9CF00E9", + "3F0DBC0186BCEB6B785BA78D2A2A013C910BE157BDAFFAE81BB6663B1A73722F7F1228795F3ECADA87CF6EF0078474AF73F31ECA0CC200ED975B6893F761CB6D", + "D4762CD4599876CA75B2B8FE249944DBD27ACE741FDAB93616CBC6E425460FEB51D4E7ADCC38180E7FC47C89024A7F56191ADB878DFDE4EAD62223F5A2610EFE", + "CD36B3D5B4C91B90FCBBA79513CFEE1907D8645A162AFD0CD4CF4192D4A5F4C892183A8EACDB2B6B6A9D9AA8C11AC1B261B380DBEE24CA468F1BFD043C58EEFE", + "98593452281661A53C48A9D8CD790826C1A1CE567738053D0BEE4A91A3D5BD92EEFDBABEBE3204F2031CA5F781BDA99EF5D8AE56E5B04A9E1ECD21B0EB05D3E1", + "771F57DD2775CCDAB55921D3E8E30CCF484D61FE1C1B9C2AE819D0FB2A12FAB9BE70C4A7A138DA84E8280435DAADE5BBE66AF0836A154F817FB17F3397E725A3", + "C60897C6F828E21F16FBB5F15B323F87B6C8955EABF1D38061F707F608ABDD993FAC3070633E286CF8339CE295DD352DF4B4B40B2F29DA1DD50B3A05D079E6BB", + "8210CD2C2D3B135C2CF07FA0D1433CD771F325D075C6469D9C7F1BA0943CD4AB09808CABF4ACB9CE5BB88B498929B4B847F681AD2C490D042DB2AEC94214B06B", + "1D4EDFFFD8FD80F7E4107840FA3AA31E32598491E4AF7013C197A65B7F36DD3AC4B478456111CD4309D9243510782FA31B7C4C95FA951520D020EB7E5C36E4EF", + "AF8E6E91FAB46CE4873E1A50A8EF448CC29121F7F74DEEF34A71EF89CC00D9274BC6C2454BBB3230D8B2EC94C62B1DEC85F3593BFA30EA6F7A44D7C09465A253", + "29FD384ED4906F2D13AA9FE7AF905990938BED807F1832454A372AB412EEA1F5625A1FCC9AC8343B7C67C5ABA6E0B1CC4644654913692C6B39EB9187CEACD3EC", + "A268C7885D9874A51C44DFFED8EA53E94F78456E0B2ED99FF5A3924760813826D960A15EDBEDBB5DE5226BA4B074E71B05C55B9756BB79E55C02754C2C7B6C8A", + "0CF8545488D56A86817CD7ECB10F7116B7EA530A45B6EA497B6C72C997E09E3D0DA8698F46BB006FC977C2CD3D1177463AC9057FDD1662C85D0C126443C10473", + "B39614268FDD8781515E2CFEBF89B4D5402BAB10C226E6344E6B9AE000FB0D6C79CB2F3EC80E80EAEB1980D2F8698916BD2E9F747236655116649CD3CA23A837", + "74BEF092FC6F1E5DBA3663A3FB003B2A5BA257496536D99F62B9D73F8F9EB3CE9FF3EEC709EB883655EC9EB896B9128F2AFC89CF7D1AB58A72F4A3BF034D2B4A", + "3A988D38D75611F3EF38B8774980B33E573B6C57BEE0469BA5EED9B44F29945E7347967FBA2C162E1C3BE7F310F2F75EE2381E7BFD6B3F0BAEA8D95DFB1DAFB1", + "58AEDFCE6F67DDC85A28C992F1C0BD0969F041E66F1EE88020A125CBFCFEBCD61709C9C4EBA192C15E69F020D462486019FA8DEA0CD7A42921A19D2FE546D43D", + "9347BD291473E6B4E368437B8E561E065F649A6D8ADA479AD09B1999A8F26B91CF6120FD3BFE014E83F23ACFA4C0AD7B3712B2C3C0733270663112CCD9285CD9", + "B32163E7C5DBB5F51FDC11D2EAC875EFBBCB7E7699090A7E7FF8A8D50795AF5D74D9FF98543EF8CDF89AC13D0485278756E0EF00C817745661E1D59FE38E7537", + "1085D78307B1C4B008C57A2E7E5B234658A0A82E4FF1E4AAAC72B312FDA0FE27D233BC5B10E9CC17FDC7697B540C7D95EB215A19A1A0E20E1ABFA126EFD568C7", + "4E5C734C7DDE011D83EAC2B7347B373594F92D7091B9CA34CB9C6F39BDF5A8D2F134379E16D822F6522170CCF2DDD55C84B9E6C64FC927AC4CF8DFB2A17701F2", + "695D83BD990A1117B3D0CE06CC888027D12A054C2677FD82F0D4FBFC93575523E7991A5E35A3752E9B70CE62992E268A877744CDD435F5F130869C9A2074B338", + "A6213743568E3B3158B9184301F3690847554C68457CB40FC9A4B8CFD8D4A118C301A07737AEDA0F929C68913C5F51C80394F53BFF1C3E83B2E40CA97EBA9E15", + "D444BFA2362A96DF213D070E33FA841F51334E4E76866B8139E8AF3BB3398BE2DFADDCBC56B9146DE9F68118DC5829E74B0C28D7711907B121F9161CB92B69A9", + "142709D62E28FCCCD0AF97FAD0F8465B971E82201DC51070FAA0372AA43E92484BE1C1E73BA10906D5D1853DB6A4106E0A7BF9800D373D6DEE2D46D62EF2A461", +} + +func TestBlake2B(t *testing.T) { + for len, expected := range unkeyed2B { + input := make([]byte, len) + for i := 0; i < len; i++ { + input[i] = byte(i) + } + + h := NewBlake2B() + h.Write(input) + d := h.Sum(nil) + + actual := fmt.Sprintf("%0128X", d) + + if actual != expected { + t.Errorf("bad hash (%d): input=%X, expected=%X, actual=%s", len, input, expected, actual) + } + } +} + +func ExampleNew() { + h := New(nil) + h.Write([]byte("one two three")) + d := h.Sum(nil) + fmt.Printf("%X", d) + // Output: + // 5BD2901E0955770DE513E3C2397F3B7594E6BCF21F61708DF64AAECCD6BC2BE6EAE0A2CA524CCB2A7F054464B07472B9E130966D3CE4B1870E02DA788C4E33BE +} + +func ExampleNew_short() { + h := New(&Config{Size: 32}) + h.Write([]byte("one two three")) + d := h.Sum(nil) + fmt.Printf("%X", d) + // Output: + // 4BBA13CA5E6C7347347A331F69CCB09872E873E9FB415A2387B025712F68844B +} + +func ExampleNew_personalized() { + h := New(&Config{ + Key: []byte("sekrit"), + Salt: []byte("random but public"), + Personal: []byte("myAppName"), + }) + h.Write([]byte("one two three")) + d := h.Sum(nil) + fmt.Printf("%X", d) + // Output: + // FE995AB57D24D0A0DB081BB8C1E1EB69F46A3CE5AC97FD463837C5FCAA18080688C7389449692F32D6FD53C6B7A475E52AFF5C3FBDCD253715C4F8D1333068C5 +} + +func ExampleNew_treehash() { + h := New(&Config{ + Tree: &Tree{ + Fanout: 64, + MaxDepth: 8, + LeafSize: 65536, + InnerHashSize: 32, + NodeDepth: 3, + NodeOffset: 23, + IsLastNode: true, + }, + }) + h.Write([]byte("one two three")) + d := h.Sum(nil) + fmt.Printf("%X", d) + // Output: + // E86CF85D23FF3E33CCBC37F37B3A8EAE0FAE26E763FB5253F3D740DF823D47AB1273D6FFC53AD8FB15F3153F3E9F92974510975AE08ED311C68D3E4C0A3B21A6 +} + +func ExampleNewBlake2B() { + h := NewBlake2B() + h.Write([]byte("one two three")) + d := h.Sum(nil) + fmt.Printf("%X", d) + // Output: + // 5BD2901E0955770DE513E3C2397F3B7594E6BCF21F61708DF64AAECCD6BC2BE6EAE0A2CA524CCB2A7F054464B07472B9E130966D3CE4B1870E02DA788C4E33BE +} + +func TestKeyedBlake2B(t *testing.T) { + key := make([]byte, 64) + for i := 0; i < 64; i++ { + key[i] = byte(i) + } + + for len, expected := range keyed2B { + input := make([]byte, len) + for i := 0; i < len; i++ { + input[i] = byte(i) + } + + h := NewKeyedBlake2B(key) + h.Write(input) + d := h.Sum(nil) + + actual := fmt.Sprintf("%0128X", d) + + if actual != expected { + t.Errorf("bad hash (%d): input=%X, expected=%X, actual=%s", len, input, expected, actual) + return + } + } +} + +func ExampleNewKeyedBlake2B() { + h := NewKeyedBlake2B([]byte("my secret")) + h.Write([]byte("one two three")) + d := h.Sum(nil) + fmt.Printf("%X", d) + // Output: + // FC182724DC024B95F62E606859AC806E4EDCA09A927F6BC8BCCD07DADE3E4F26FC9D041661407527AADEF517A173E19BAB5C389217C29A08BE9731AEC83C02C3 +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/.gitignore b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 0000000..be2cc4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/README.md b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 0000000..37fa54d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic("bad len: %v", l.Len()) +} +``` diff --git a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 0000000..8ccb729 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,119 @@ +// This package provides a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +package lru + +import ( + "container/list" + "errors" + "sync" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + size int + evictList *list.List + items map[interface{}]*list.Element + lock sync.Mutex +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// New creates an LRU of the given size +func New(size int) (*Cache, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &Cache{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element, size), + } + return c, nil +} + +// Purge is used to completely clear the cache +func (c *Cache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.evictList = list.New() + c.items = make(map[interface{}]*list.Element, c.size) +} + +// Add adds a value to the cache. +func (c *Cache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + // Verify size not exceeded + if c.evictList.Len() > c.size { + c.removeOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + defer c.lock.Unlock() + c.removeOldest() +} + +// removeOldest removes the oldest item from the cache. +func (c *Cache) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *Cache) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.Lock() + defer c.lock.Unlock() + return c.evictList.Len() +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go new file mode 100644 index 0000000..9c1063f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go @@ -0,0 +1,45 @@ +package lru + +import ( + "testing" +) + +func TestLRU(t *testing.T) { + l, err := New(128) + if err != nil { + t.Fatalf("err: %v", err) + } + for i := 0; i < 256; i++ { + l.Add(i, i) + } + if l.Len() != 128 { + t.Fatalf("bad len: %v", l.Len()) + } + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if ok { + t.Fatalf("should be evicted") + } + } + for i := 128; i < 256; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("should not be evicted") + } + } + for i := 128; i < 192; i++ { + l.Remove(i) + _, ok := l.Get(i) + if ok { + t.Fatalf("should be deleted") + } + } + + l.Purge() + if l.Len() != 0 { + t.Fatalf("bad len: %v", l.Len()) + } + if _, ok := l.Get(200); ok { + t.Fatalf("should contain nothing") + } +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/baseRequest.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/baseRequest.go new file mode 100644 index 0000000..915c196 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/baseRequest.go @@ -0,0 +1,113 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + //"bytes" + "encoding/json" + "fmt" + "io" + "log" + "time" +) + +func DoCommand(method string, url string, data interface{}) ([]byte, error) { + var response map[string]interface{} + var body []byte + var httpStatusCode int + req, err := ElasticSearchRequest(method, url) + //log.Println(req.URL) + if err != nil { + return body, err + } + + if data != nil { + switch v := data.(type) { + case string: + req.SetBodyString(v) + case io.Reader: + req.SetBody(v) + //case *bytes.Buffer: + // req.SetBody(v) + default: + err = req.SetBodyJson(v) + if err != nil { + return body, err + } + } + + } + httpStatusCode, body, err = req.Do(&response) + + if err != nil { + return body, err + } + if httpStatusCode > 304 { + + jsonErr := json.Unmarshal(body, &response) + if jsonErr == nil { + if error, ok := response["error"]; ok { + status, _ := response["status"] + return body, ESError{time.Now(), fmt.Sprintf("Error [%s] Status [%v]", error, status), httpStatusCode} + } + } + return body, jsonErr + } + return body, nil +} + +// ESError is an error implementation that includes a time, message, and code. +type ESError struct { + When time.Time + What string + Code int +} + +func (e ESError) Error() string { + return fmt.Sprintf("%v: %v [%v]", e.When, e.What, e.Code) +} + +// Exists allows the caller to check for the existance of a document using HEAD +// This appears to be broken in the current version of elasticsearch 0.19.10, currently +// returning nothing +func Exists(pretty bool, index string, _type string, id string) (BaseResponse, error) { + var response map[string]interface{} + var body []byte + var url string + var retval BaseResponse + var httpStatusCode int + + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/%s?%s", index, _type, id, Pretty(pretty)) + } else { + url = fmt.Sprintf("/%s/%s?%s", index, id, Pretty(pretty)) + } + req, err := ElasticSearchRequest("HEAD", url) + if err != nil { + // some sort of generic error handler + } + httpStatusCode, body, err = req.Do(&response) + if httpStatusCode > 304 { + if error, ok := response["error"]; ok { + status, _ := response["status"] + log.Printf("Error: %v (%v)\n", error, status) + } + } else { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + log.Println(jsonErr) + } + } + //fmt.Println(string(body)) + return retval, err +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/baseResponse.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/baseResponse.go new file mode 100644 index 0000000..035cac2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/baseResponse.go @@ -0,0 +1,74 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" +) + +type BaseResponse struct { + Ok bool `json:"ok"` + Index string `json:"_index,omitempty"` + Type string `json:"_type,omitempty"` + Id string `json:"_id,omitempty"` + Source interface{} `json:"_source,omitempty"` // depends on the schema you've defined + Version int `json:"_version,omitempty"` + Found bool `json:"found,omitempty"` + Exists bool `json:"exists,omitempty"` +} +type ExtendedStatus struct { + Ok bool `json:"ok"` + ShardsStatus Status `json:"_shards"` +} +type Status struct { + Total int `json:"total"` + Successful int `json:"successful"` + Failed int `json:"failed"` +} + +type Match struct { + OK bool `json:"ok"` + Matches []string `json:"matches"` + Explaination Explaination `json:"explaination,omitempty"` +} + +type Explaination struct { + Value float32 `json:"value"` + Description string `json:"description"` + Details []Explaination `json:"details,omitempty"` +} + +func Pretty(pretty bool) string { + prettyString := "" + if pretty == true { + prettyString = "pretty=1" + } + return prettyString +} + +// http://www.elasticsearch.org/guide/reference/api/search/search-type/ + +func Scan(scan int) string { + scanString := "" + if scan > 0 { + scanString = fmt.Sprintf("&search_type=scan&size=%v", scan) + } + return scanString +} + +func Scroll(duration string) string { + scrollString := "" + if duration != "" { + scrollString = "&scroll=" + duration + } + return scrollString +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/request.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/request.go new file mode 100644 index 0000000..46e35f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/request.go @@ -0,0 +1,111 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "runtime" + "strings" +) + +type Request http.Request + +const ( + Version = "0.0.2" + DefaultProtocol = "http" + DefaultDomain = "localhost" + DefaultPort = "9200" +) + +var ( + _ = log.Ldate + Protocol string = DefaultProtocol + Domain string = DefaultDomain + ClusterDomains [1]string = [1]string{DefaultDomain} + Port string = DefaultPort +) + +func ElasticSearchRequest(method, path string) (*Request, error) { + req, err := http.NewRequest(method, fmt.Sprintf("%s://%s:%s%s", Protocol, Domain, Port, path), nil) + if err != nil { + return nil, err + } + req.Header.Add("Accept", "application/json") + req.Header.Add("User-Agent", "elasticSearch/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") + return (*Request)(req), nil +} + +func (r *Request) SetBodyJson(data interface{}) error { + body, err := json.Marshal(data) + if err != nil { + return err + } + r.SetBody(bytes.NewReader(body)) + r.Header.Set("Content-Type", "application/json") + return nil +} + +func (r *Request) SetBodyString(body string) { + r.SetBody(strings.NewReader(body)) +} + +func (r *Request) SetBody(body io.Reader) { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + r.Body = rc + if body != nil { + switch v := body.(type) { + case *strings.Reader: + r.ContentLength = int64(v.Len()) + case *bytes.Buffer: + r.ContentLength = int64(v.Len()) + } + } +} + +func (r *Request) Do(v interface{}) (int, []byte, error) { + response, bodyBytes, err := r.DoResponse(v) + if err != nil { + return -1, nil, err + } + return response.StatusCode, bodyBytes, err +} + +func (r *Request) DoResponse(v interface{}) (*http.Response, []byte, error) { + res, err := http.DefaultClient.Do((*http.Request)(r)) + if err != nil { + return nil, nil, err + } + + defer res.Body.Close() + bodyBytes, err := ioutil.ReadAll(res.Body) + + if err != nil { + return nil, nil, err + } + + if res.StatusCode > 304 && v != nil { + jsonErr := json.Unmarshal(bodyBytes, v) + if jsonErr != nil { + return nil, nil, jsonErr + } + } + return res, bodyBytes, err +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/searchdsl.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/searchdsl.go new file mode 100644 index 0000000..3fafb8a --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/searchdsl.go @@ -0,0 +1,31 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +type SearchRequest struct { + From int `json:"from,omitempty"` + Size int `json:"size,omitempty"` + Query Query `json:"query,omitempty"` + Filter Filter `json:"filter,omitempty"` +} + +type Filter struct { + Term Term `json:"term"` +} + +type Facets struct { + Tag Terms `json:"tag"` +} + +type Terms struct { + Terms string `json:"terms"` +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/shared.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/shared.go new file mode 100644 index 0000000..5f0ac1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/api/shared.go @@ -0,0 +1,24 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +type Query struct { + Query Term `json:"query"` +} + +type Term struct { + Term string `json:"term"` +} + +func (q Query) setQuery(query string) { + q.Query.Term = query +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulk.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulk.go new file mode 100644 index 0000000..88d8f02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulk.go @@ -0,0 +1,462 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "io" + "log" + "strconv" + "sync" + "time" +) + +var ( + // Max buffer size in bytes before flushing to elasticsearch + BulkMaxBuffer = 1048576 + // Max number of Docs to hold in buffer before forcing flush + BulkMaxDocs = 100 + // Max delay before forcing a flush to Elasticearch + BulkDelaySeconds = 5 + // Keep a running total of errors seen, since it is in the background + BulkErrorCt uint64 + // maximum wait shutdown seconds + MAX_SHUTDOWN_SECS = 5 + + // There is one Global Bulk Indexer for convenience + GlobalBulkIndexer *BulkIndexer +) + +type ErrorBuffer struct { + Err error + Buf *bytes.Buffer +} + +// There is one global bulk indexer available for convenience so the IndexBulk() function can be called. +// However, the recommended usage is create your own BulkIndexer to allow for multiple seperate elasticsearch +// servers/host connections. +// @maxConns is the max number of in flight http requests +// @done is a channel to cause the indexer to stop +// +// done := make(chan bool) +// BulkIndexerGlobalRun(100, done) +func BulkIndexerGlobalRun(maxConns int, done chan bool) { + if GlobalBulkIndexer == nil { + GlobalBulkIndexer = NewBulkIndexer(maxConns) + GlobalBulkIndexer.Run(done) + } +} + +// A bulk indexer creates goroutines, and channels for connecting and sending data +// to elasticsearch in bulk, using buffers. +type BulkIndexer struct { + + // We are creating a variable defining the func responsible for sending + // to allow a mock sendor for test purposes + BulkSendor func(*bytes.Buffer) error + + // If we encounter an error in sending, we are going to retry for this long + // before returning an error + // if 0 it will not retry + RetryForSeconds int + + // channel for getting errors + ErrorChannel chan *ErrorBuffer + + // channel for sending to background indexer + bulkChannel chan []byte + + // shutdown channel + shutdownChan chan bool + // Channel to shutdown http send go-routines + httpDoneChan chan bool + // channel to shutdown timer + timerDoneChan chan bool + // channel to shutdown doc go-routines + docDoneChan chan bool + + // Channel to send a complete byte.Buffer to the http sendor + sendBuf chan *bytes.Buffer + // byte buffer for docs that have been converted to bytes, but not yet sent + buf *bytes.Buffer + // Buffer for Max number of time before forcing flush + BufferDelayMax time.Duration + // Max buffer size in bytes before flushing to elasticsearch + BulkMaxBuffer int // 1048576 + // Max number of Docs to hold in buffer before forcing flush + BulkMaxDocs int // 100 + + // Number of documents we have send through so far on this session + docCt int + // Max number of http connections in flight at one time + maxConns int + // If we are indexing enough docs per bufferdelaymax, we won't need to do time + // based eviction, else we do. + needsTimeBasedFlush bool + // Lock for document writes/operations + mu sync.Mutex + // Wait Group for the http sends + sendWg *sync.WaitGroup +} + +func NewBulkIndexer(maxConns int) *BulkIndexer { + b := BulkIndexer{sendBuf: make(chan *bytes.Buffer, maxConns)} + b.needsTimeBasedFlush = true + b.buf = new(bytes.Buffer) + b.maxConns = maxConns + b.BulkMaxBuffer = BulkMaxBuffer + b.BulkMaxDocs = BulkMaxDocs + b.BufferDelayMax = time.Duration(BulkDelaySeconds) * time.Second + b.bulkChannel = make(chan []byte, 100) + b.sendWg = new(sync.WaitGroup) + b.docDoneChan = make(chan bool) + b.timerDoneChan = make(chan bool) + b.httpDoneChan = make(chan bool) + return &b +} + +// A bulk indexer with more control over error handling +// @maxConns is the max number of in flight http requests +// @retrySeconds is # of seconds to wait before retrying falied requests +// +// done := make(chan bool) +// BulkIndexerGlobalRun(100, done) +func NewBulkIndexerErrors(maxConns, retrySeconds int) *BulkIndexer { + b := NewBulkIndexer(maxConns) + b.RetryForSeconds = retrySeconds + b.ErrorChannel = make(chan *ErrorBuffer, 20) + return b +} + +// Starts this bulk Indexer running, this Run opens a go routine so is +// Non blocking +func (b *BulkIndexer) Run(done chan bool) { + + go func() { + if b.BulkSendor == nil { + b.BulkSendor = BulkSend + } + b.shutdownChan = done + b.startHttpSendor() + b.startDocChannel() + b.startTimer() + <-b.shutdownChan + b.Flush() + b.shutdown() + }() +} + +// Make a channel that will close when the given WaitGroup is done. +func wgChan(wg *sync.WaitGroup) <-chan interface{} { + ch := make(chan interface{}) + go func() { + wg.Wait() + close(ch) + }() + return ch +} + +// Flush all current documents to ElasticSearch +func (b *BulkIndexer) Flush() { + b.mu.Lock() + if b.docCt > 0 { + b.send(b.buf) + } + b.mu.Unlock() + for { + select { + case <-wgChan(b.sendWg): + // done + log.Println("BulkIndexor Wait Group Shutdown") + return + case <-time.After(time.Second * time.Duration(MAX_SHUTDOWN_SECS)): + // timeout! + log.Println("BulkIndexor Timeout in Shutdown!") + return + } + } +} + +func (b *BulkIndexer) startHttpSendor() { + + // this sends http requests to elasticsearch it uses maxConns to open up that + // many goroutines, each of which will synchronously call ElasticSearch + // in theory, the whole set will cause a backup all the way to IndexBulk if + // we have consumed all maxConns + for i := 0; i < b.maxConns; i++ { + go func() { + for { + select { + case buf := <-b.sendBuf: + b.sendWg.Add(1) + err := b.BulkSendor(buf) + + // Perhaps a b.FailureStrategy(err) ?? with different types of strategies + // 1. Retry, then panic + // 2. Retry then return error and let runner decide + // 3. Retry, then log to disk? retry later? + if err != nil { + if b.RetryForSeconds > 0 { + time.Sleep(time.Second * time.Duration(b.RetryForSeconds)) + err = b.BulkSendor(buf) + if err == nil { + // Successfully re-sent with no error + b.sendWg.Done() + continue + } + } + if b.ErrorChannel != nil { + log.Println(err) + b.ErrorChannel <- &ErrorBuffer{err, buf} + } + } + b.sendWg.Done() + case <-b.httpDoneChan: + // shutdown this go routine + return + } + + } + }() + } +} + +// start a timer for checking back and forcing flush ever BulkDelaySeconds seconds +// even if we haven't hit max messages/size +func (b *BulkIndexer) startTimer() { + ticker := time.NewTicker(b.BufferDelayMax) + log.Println("Starting timer with delay = ", b.BufferDelayMax) + go func() { + for { + select { + case <-ticker.C: + b.mu.Lock() + // don't send unless last sendor was the time, + // otherwise an indication of other thresholds being hit + // where time isn't needed + if b.buf.Len() > 0 && b.needsTimeBasedFlush { + b.needsTimeBasedFlush = true + b.send(b.buf) + } else if b.buf.Len() > 0 { + b.needsTimeBasedFlush = true + } + b.mu.Unlock() + case <-b.timerDoneChan: + // shutdown this go routine + return + } + + } + }() +} + +func (b *BulkIndexer) startDocChannel() { + // This goroutine accepts incoming byte arrays from the IndexBulk function and + // writes to buffer + go func() { + for { + select { + case docBytes := <-b.bulkChannel: + b.mu.Lock() + b.docCt += 1 + b.buf.Write(docBytes) + if b.buf.Len() >= b.BulkMaxBuffer || b.docCt >= b.BulkMaxDocs { + b.needsTimeBasedFlush = false + //log.Printf("Send due to size: docs=%d bufsize=%d", b.docCt, b.buf.Len()) + b.send(b.buf) + } + b.mu.Unlock() + case <-b.docDoneChan: + // shutdown this go routine + return + } + } + }() +} + +func (b *BulkIndexer) send(buf *bytes.Buffer) { + //b2 := *b.buf + b.sendBuf <- buf + b.buf = new(bytes.Buffer) + b.docCt = 0 +} + +func (b *BulkIndexer) shutdown() { + // This must be called After flush + b.docDoneChan <- true + log.Println("Just sent to doc chan done") + b.timerDoneChan <- true + for i := 0; i < b.maxConns; i++ { + b.httpDoneChan <- true + } +} + +// The index bulk API adds or updates a typed JSON document to a specific index, making it searchable. +// it operates by buffering requests, and ocassionally flushing to elasticsearch +// http://www.elasticsearch.org/guide/reference/api/bulk.html +func (b *BulkIndexer) Index(index string, _type string, id, ttl string, date *time.Time, data interface{}) error { + //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + by, err := WriteBulkBytes("index", index, _type, id, ttl, date, data) + if err != nil { + log.Println(err) + return err + } + b.bulkChannel <- by + return nil +} + +func (b *BulkIndexer) Update(index string, _type string, id, ttl string, date *time.Time, data interface{}) error { + //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + by, err := WriteBulkBytes("update", index, _type, id, ttl, date, data) + if err != nil { + log.Println(err) + return err + } + b.bulkChannel <- by + return nil +} + +// This does the actual send of a buffer, which has already been formatted +// into bytes of ES formatted bulk data +func BulkSend(buf *bytes.Buffer) error { + _, err := api.DoCommand("POST", "/_bulk", buf) + if err != nil { + log.Println(err) + BulkErrorCt += 1 + return err + } + return nil +} + +// Given a set of arguments for index, type, id, data create a set of bytes that is formatted for bulkd index +// http://www.elasticsearch.org/guide/reference/api/bulk.html +func WriteBulkBytes(op string, index string, _type string, id, ttl string, date *time.Time, data interface{}) ([]byte, error) { + // only index and update are currently supported + if op != "index" && op != "update" { + return nil, errors.New(fmt.Sprintf("Operation '%s' is not yet supported", op)) + } + + // First line + buf := bytes.Buffer{} + buf.WriteString(fmt.Sprintf(`{"%s":{"_index":"`, op)) + buf.WriteString(index) + buf.WriteString(`","_type":"`) + buf.WriteString(_type) + if len(id) > 0 { + buf.WriteString(`","_id":"`) + buf.WriteString(id) + } + + if op == "update" { + buf.WriteString(`","retry_on_conflict":"3`) + buf.WriteString(ttl) + } + + if len(ttl) > 0 { + buf.WriteString(`","ttl":"`) + buf.WriteString(ttl) + } + if date != nil { + buf.WriteString(`","_timestamp":"`) + buf.WriteString(strconv.FormatInt(date.UnixNano()/1e6, 10)) + } + buf.WriteString(`"}}`) + buf.WriteByte('\n') + + switch v := data.(type) { + case *bytes.Buffer: + io.Copy(&buf, v) + case []byte: + buf.Write(v) + case string: + buf.WriteString(v) + default: + body, jsonErr := json.Marshal(data) + if jsonErr != nil { + log.Println("Json data error ", data) + return nil, jsonErr + } + buf.Write(body) + } + buf.WriteByte('\n') + return buf.Bytes(), nil +} + +// The index bulk API adds or updates a typed JSON document to a specific index, making it searchable. +// it operates by buffering requests, and ocassionally flushing to elasticsearch +// +// This uses the one Global Bulk Indexer, you can also create your own non-global indexers and use the +// Index functions of that +// +// http://www.elasticsearch.org/guide/reference/api/bulk.html +func IndexBulk(index string, _type string, id string, date *time.Time, data interface{}) error { + //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + if GlobalBulkIndexer == nil { + panic("Must have Global Bulk Indexer to use this Func") + } + by, err := WriteBulkBytes("index", index, _type, id, "", date, data) + if err != nil { + return err + } + GlobalBulkIndexer.bulkChannel <- by + return nil +} + +func UpdateBulk(index string, _type string, id string, date *time.Time, data interface{}) error { + //{ "update" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + if GlobalBulkIndexer == nil { + panic("Must have Global Bulk Indexer to use this Func") + } + by, err := WriteBulkBytes("update", index, _type, id, "", date, data) + if err != nil { + return err + } + GlobalBulkIndexer.bulkChannel <- by + return nil +} + +// The index bulk API adds or updates a typed JSON document to a specific index, making it searchable. +// it operates by buffering requests, and ocassionally flushing to elasticsearch. +// +// This uses the one Global Bulk Indexer, you can also create your own non-global indexers and use the +// IndexTtl functions of that +// +// http://www.elasticsearch.org/guide/reference/api/bulk.html +func IndexBulkTtl(index string, _type string, id, ttl string, date *time.Time, data interface{}) error { + //{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + if GlobalBulkIndexer == nil { + panic("Must have Global Bulk Indexer to use this Func") + } + by, err := WriteBulkBytes("index", index, _type, id, ttl, date, data) + if err != nil { + return err + } + GlobalBulkIndexer.bulkChannel <- by + return nil +} + +func UpdateBulkTtl(index string, _type string, id, ttl string, date *time.Time, data interface{}) error { + //{ "update" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + if GlobalBulkIndexer == nil { + panic("Must have Global Bulk Indexer to use this Func") + } + by, err := WriteBulkBytes("update", index, _type, id, ttl, date, data) + if err != nil { + return err + } + GlobalBulkIndexer.bulkChannel <- by + return nil +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulkUDP.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulkUDP.go new file mode 100644 index 0000000..0527446 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulkUDP.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulk_test.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulk_test.go new file mode 100644 index 0000000..0b1d2e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/bulk_test.go @@ -0,0 +1,259 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "flag" + "fmt" + "github.com/araddon/gou" + "github.com/bmizerany/assert" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "log" + "strconv" + "testing" + "time" +) + +// go test -bench=".*" +// go test -bench="Bulk" + +var ( + buffers = make([]*bytes.Buffer, 0) + totalBytesSent int + messageSets int +) + +func init() { + flag.Parse() + if testing.Verbose() { + gou.SetupLogging("debug") + } +} + +// take two ints, compare, need to be within 5% +func CloseInt(a, b int) bool { + c := float64(a) / float64(b) + if c >= .95 && c <= 1.05 { + return true + } + return false +} + +func TestBulkIndexerBasic(t *testing.T) { + InitTests(true) + indexer := NewBulkIndexer(3) + indexer.BulkSendor = func(buf *bytes.Buffer) error { + messageSets += 1 + totalBytesSent += buf.Len() + buffers = append(buffers, buf) + gou.Debug(string(buf.Bytes())) + return BulkSend(buf) + } + done := make(chan bool) + indexer.Run(done) + + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)} + err := indexer.Index("users", "user", "1", "", &date, data) + + WaitFor(func() bool { + return len(buffers) > 0 + }, 5) + // part of request is url, so lets factor that in + //totalBytesSent = totalBytesSent - len(*eshost) + assert.T(t, len(buffers) == 1, fmt.Sprintf("Should have sent one operation but was %d", len(buffers))) + assert.T(t, BulkErrorCt == 0 && err == nil, fmt.Sprintf("Should not have any errors %v", err)) + assert.T(t, totalBytesSent == 145, fmt.Sprintf("Should have sent 135 bytes but was %v", totalBytesSent)) + + err = indexer.Index("users", "user", "2", "", nil, data) + <-time.After(time.Millisecond * 10) // we need to wait for doc to hit send channel + // this will test to ensure that Flush actually catches a doc + indexer.Flush() + totalBytesSent = totalBytesSent - len(*eshost) + assert.T(t, err == nil, fmt.Sprintf("Should have nil error =%v", err)) + assert.T(t, len(buffers) == 2, fmt.Sprintf("Should have another buffer ct=%d", len(buffers))) + + assert.T(t, BulkErrorCt == 0, fmt.Sprintf("Should not have any errors %d", BulkErrorCt)) + assert.T(t, CloseInt(totalBytesSent, 257), fmt.Sprintf("Should have sent 257 bytes but was %v", totalBytesSent)) + + done <- true +} + +func TestBulkUpdate(t *testing.T) { + InitTests(true) + api.Port = "9200" + indexer := NewBulkIndexer(3) + indexer.BulkSendor = func(buf *bytes.Buffer) error { + messageSets += 1 + totalBytesSent += buf.Len() + buffers = append(buffers, buf) + gou.Debug(string(buf.Bytes())) + return BulkSend(buf) + } + done := make(chan bool) + indexer.Run(done) + + date := time.Unix(1257894000, 0) + user := map[string]interface{}{ + "name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "count": 1, + } + + // Lets make sure the data is in the index ... + _, err := Index(true, "users", "user", "5", user) + + // script and params + data := map[string]interface{}{ + "script": "ctx._source.count += 2", + } + err = indexer.Update("users", "user", "5", "", &date, data) + // So here's the deal. Flushing does seem to work, you just have to give the + // channel a moment to recieve the message ... + // <- time.After(time.Millisecond * 20) + // indexer.Flush() + done <- true + + WaitFor(func() bool { + return len(buffers) > 0 + }, 5) + + assert.T(t, BulkErrorCt == 0 && err == nil, fmt.Sprintf("Should not have any errors %v", err)) + + response, err := Get(true, "users", "user", "5") + assert.T(t, err == nil, fmt.Sprintf("Should not have any errors %v", err)) + newCount := response.Source.(map[string]interface{})["count"] + assert.T(t, newCount.(float64) == 3, + fmt.Sprintf("Should have update count: %#v ... %#v", response.Source.(map[string]interface{})["count"], response)) +} + +func TestBulkSmallBatch(t *testing.T) { + InitTests(true) + + done := make(chan bool) + + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)} + + // Now tests small batches + indexersm := NewBulkIndexer(1) + indexersm.BufferDelayMax = 100 * time.Millisecond + indexersm.BulkMaxDocs = 2 + messageSets = 0 + indexersm.BulkSendor = func(buf *bytes.Buffer) error { + messageSets += 1 + return BulkSend(buf) + } + indexersm.Run(done) + <-time.After(time.Millisecond * 20) + + indexersm.Index("users", "user", "2", "", &date, data) + indexersm.Index("users", "user", "3", "", &date, data) + indexersm.Index("users", "user", "4", "", &date, data) + <-time.After(time.Millisecond * 200) + // indexersm.Flush() + done <- true + assert.T(t, messageSets == 2, fmt.Sprintf("Should have sent 2 message sets %d", messageSets)) + +} + +func TestBulkErrors(t *testing.T) { + // lets set a bad port, and hope we get a connection refused error? + api.Port = "27845" + defer func() { + api.Port = "9200" + }() + BulkDelaySeconds = 1 + indexer := NewBulkIndexerErrors(10, 1) + done := make(chan bool) + indexer.Run(done) + + errorCt := 0 + go func() { + for i := 0; i < 20; i++ { + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)} + indexer.Index("users", "user", strconv.Itoa(i), "", &date, data) + } + }() + for errBuf := range indexer.ErrorChannel { + errorCt++ + gou.Debug(errBuf.Err) + break + } + assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt)) + done <- true +} + +/* +BenchmarkBulkSend 18:33:00 bulk_test.go:131: Sent 1 messages in 0 sets totaling 0 bytes +18:33:00 bulk_test.go:131: Sent 100 messages in 1 sets totaling 145889 bytes +18:33:01 bulk_test.go:131: Sent 10000 messages in 100 sets totaling 14608888 bytes +18:33:05 bulk_test.go:131: Sent 20000 messages in 99 sets totaling 14462790 bytes + 20000 234526 ns/op + +*/ +func BenchmarkBulkSend(b *testing.B) { + InitTests(true) + b.StartTimer() + totalBytes := 0 + sets := 0 + GlobalBulkIndexer.BulkSendor = func(buf *bytes.Buffer) error { + totalBytes += buf.Len() + sets += 1 + //log.Println("got bulk") + return BulkSend(buf) + } + for i := 0; i < b.N; i++ { + about := make([]byte, 1000) + rand.Read(about) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} + IndexBulk("users", "user", strconv.Itoa(i), nil, data) + } + log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) + if BulkErrorCt != 0 { + b.Fail() + } +} + +/* +TODO: this should be faster than above + +BenchmarkBulkSendBytes 18:33:05 bulk_test.go:169: Sent 1 messages in 0 sets totaling 0 bytes +18:33:05 bulk_test.go:169: Sent 100 messages in 2 sets totaling 292299 bytes +18:33:09 bulk_test.go:169: Sent 10000 messages in 99 sets totaling 14473800 bytes + 10000 373529 ns/op + +*/ +func BenchmarkBulkSendBytes(b *testing.B) { + InitTests(true) + about := make([]byte, 1000) + rand.Read(about) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} + body, _ := json.Marshal(data) + b.StartTimer() + totalBytes := 0 + sets := 0 + GlobalBulkIndexer.BulkSendor = func(buf *bytes.Buffer) error { + totalBytes += buf.Len() + sets += 1 + return BulkSend(buf) + } + for i := 0; i < b.N; i++ { + IndexBulk("users", "user", strconv.Itoa(i), nil, body) + } + log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) + if BulkErrorCt != 0 { + b.Fail() + } +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/count.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/count.go new file mode 100644 index 0000000..d8a0981 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/count.go @@ -0,0 +1,49 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +type CountResponse struct { + Count int `json:"count"` + Shard api.Status `json:"_shards"` +} + +// Count allows the caller to easily execute a query and get the number of matches for that query. +// It can be executed across one or more indices and across one or more types. +// The query can either be provided using a simple query string as a parameter, +// or using the Query DSL defined within the request body. +// http://www.elasticsearch.org/guide/reference/api/count.html +// TODO: take parameters. +// currently not working against 0.19.10 +func Count(pretty bool, index string, _type string) (CountResponse, error) { + var url string + var retval CountResponse + url = fmt.Sprintf("/%s/%s/_count?%s", index, _type, api.Pretty(pretty)) + body, err := api.DoCommand("GET", url, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + //fmt.Println(body) + return retval, err +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/delete.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/delete.go new file mode 100644 index 0000000..6fb55fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/delete.go @@ -0,0 +1,40 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +// Delete API allows to delete a typed JSON document from a specific index based on its id. +// http://www.elasticsearch.org/guide/reference/api/delete.html +// todo: add routing and versioning support +func Delete(pretty bool, index string, _type string, id string, version int, routing string) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + url = fmt.Sprintf("/%s/%s/%s?%s", index, _type, id, api.Pretty(pretty)) + body, err := api.DoCommand("DELETE", url, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + //fmt.Println(body) + return retval, err +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/deleteByQuery.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/deleteByQuery.go new file mode 100644 index 0000000..621f333 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/deleteByQuery.go @@ -0,0 +1,59 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "strings" +) + +// DeleteByQuery allows the caller to delete documents from one or more indices and one or more types based on a query. +// The query can either be provided using a simple query string as a parameter, or using the Query DSL defined within +// the request body. +// see: http://www.elasticsearch.org/guide/reference/api/delete-by-query.html +func DeleteByQuery(pretty bool, indices []string, types []string, query interface{}) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + if len(indices) > 0 && len(types) > 0 { + url = fmt.Sprintf("http://localhost:9200/%s/%s/_query?%s&%s", strings.Join(indices, ","), strings.Join(types, ","), buildQuery(), api.Pretty(pretty)) + } else if len(indices) > 0 { + url = fmt.Sprintf("http://localhost:9200/%s/_query?%s&%s", strings.Join(indices, ","), buildQuery(), api.Pretty(pretty)) + } + body, err := api.DoCommand("DELETE", url, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + fmt.Println(body) + return retval, err +} + +func buildQuery() string { + return "" +} + +type DeleteByQueryResponse struct { + Status bool `json:"ok"` + Indicies map[string]IndexStatus `json:"_indices"` +} + +type IndexStatus struct { + Shards api.Status `json:"_shards"` +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/example_test.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/example_test.go new file mode 100644 index 0000000..89b5210 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/example_test.go @@ -0,0 +1,106 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "bytes" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/core" + "strconv" + "time" +) + +// The simplest usage of background bulk indexing +func ExampleBulkIndexer_simple() { + indexer := core.NewBulkIndexerErrors(10, 60) + done := make(chan bool) + indexer.Run(done) + + indexer.Index("twitter", "user", "1", "", nil, `{"name":"bob"}`) + + <-done // wait forever +} + +// The simplest usage of background bulk indexing with error channel +func ExampleBulkIndexer_errorchannel() { + indexer := core.NewBulkIndexerErrors(10, 60) + done := make(chan bool) + indexer.Run(done) + + go func() { + for errBuf := range indexer.ErrorChannel { + // just blissfully print errors forever + fmt.Println(errBuf.Err) + } + }() + for i := 0; i < 20; i++ { + indexer.Index("twitter", "user", strconv.Itoa(i), "", nil, `{"name":"bob"}`) + } + done <- true +} + +// The simplest usage of background bulk indexing with error channel +func ExampleBulkIndexer_errorsmarter() { + indexer := core.NewBulkIndexerErrors(10, 60) + done := make(chan bool) + indexer.Run(done) + + errorCt := 0 // use sync.atomic or something if you need + timer := time.NewTicker(time.Minute * 3) + go func() { + for { + select { + case _ = <-timer.C: + if errorCt < 2 { + errorCt = 0 + } + case _ = <-done: + return + } + } + }() + + go func() { + for errBuf := range indexer.ErrorChannel { + errorCt++ + fmt.Println(errBuf.Err) + // log to disk? db? ???? Panic + } + }() + for i := 0; i < 20; i++ { + indexer.Index("twitter", "user", strconv.Itoa(i), "", nil, `{"name":"bob"}`) + } + done <- true // send shutdown signal +} + +// The inspecting the response +func ExampleBulkIndexer_responses() { + indexer := core.NewBulkIndexer(10) + // Create a custom Sendor Func, to allow inspection of response/error + indexer.BulkSendor = func(buf *bytes.Buffer) error { + // @buf is the buffer of docs about to be written + respJson, err := api.DoCommand("POST", "/_bulk", buf) + if err != nil { + // handle it better than this + fmt.Println(string(respJson)) + } + return err + } + done := make(chan bool) + indexer.Run(done) + + for i := 0; i < 20; i++ { + indexer.Index("twitter", "user", strconv.Itoa(i), "", nil, `{"name":"bob"}`) + } + done <- true // send shutdown signal +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/explain.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/explain.go new file mode 100644 index 0000000..3772319 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/explain.go @@ -0,0 +1,45 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +// Explain computes a score explanation for a query and a specific document. +// This can give useful feedback whether a document matches or didn’t match a specific query. +// This feature is available from version 0.19.9 and up. +// see http://www.elasticsearch.org/guide/reference/api/explain.html +func Explain(pretty bool, index string, _type string, id string, query string) (api.Match, error) { + var url string + var retval api.Match + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/_explain?%s", index, _type, api.Pretty(pretty)) + } else { + url = fmt.Sprintf("/%s/_explain?%s", index, api.Pretty(pretty)) + } + body, err := api.DoCommand("GET", url, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + fmt.Println(body) + return retval, err +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/get.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/get.go new file mode 100644 index 0000000..c9a4f47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/get.go @@ -0,0 +1,108 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "net/http" +) + +// Get allows caller to get a typed JSON document from the index based on its id. +// GET - retrieves the doc +// HEAD - checks for existence of the doc +// http://www.elasticsearch.org/guide/reference/api/get.html +// TODO: make this implement an interface +func Get(pretty bool, index string, _type string, id string) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/%s?%s", index, _type, id, api.Pretty(pretty)) + } else { + url = fmt.Sprintf("/%s/%s?%s", index, id, api.Pretty(pretty)) + } + body, err := api.DoCommand("GET", url, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + //fmt.Println(body) + return retval, err +} + +// GetSource retrieves the document by id and converts it to provided interface +func GetSource(index string, _type string, id string, source interface{}) error { + url := fmt.Sprintf("/%s/%s/%s/_source", index, _type, id) + body, err := api.DoCommand("GET", url, nil) + if err == nil { + err = json.Unmarshal(body, &source) + } + //fmt.Println(body) + return err +} + +// Exists allows caller to check for the existance of a document using HEAD +func Exists(pretty bool, index string, _type string, id string) (bool, error) { + + var url string + + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/%s?fields=_id%s", index, _type, id, api.Pretty(pretty)) + } else { + url = fmt.Sprintf("/%s/%s?fields=_id%s", index, id, api.Pretty(pretty)) + } + + req, err := api.ElasticSearchRequest("HEAD", url) + + if err != nil { + fmt.Println(err) + } + + httpStatusCode, _, err := req.Do(nil) + + if err != nil { + return false, err + } + if httpStatusCode == http.StatusOK { + return true, err + } else { + return false, err + } +} + +// ExistsIndex allows caller to check for the existance of an index or a type using HEAD +func ExistsIndex(pretty bool, index string, _type string) (bool, error) { + var url string + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s?%s", index, _type, api.Pretty(pretty)) + } else { + url = fmt.Sprintf("/%s?%s", index, api.Pretty(pretty)) + } + req, err := api.ElasticSearchRequest("HEAD", url) + httpStatusCode, _, err := req.Do(nil) + + if err != nil { + return false, err + } + if httpStatusCode == http.StatusOK { + return true, err + } else { + return false, err + } +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/index.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/index.go new file mode 100644 index 0000000..76427e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/index.go @@ -0,0 +1,128 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "net/url" + "strconv" +) + +// Index adds or updates a typed JSON document in a specific index, making it searchable, creating an index +// if it did not exist. +// if id is omited, op_type 'create' will be pased and http method will default to "POST" +// id is optional +// parentId is optional +// version is optional +// op_type is optional +// routing is optional +// timestamp is optional +// ttl is optional +// percolate is optional +// timeout is optional +// http://www.elasticsearch.org/guide/reference/api/index_.html +func Index(pretty bool, index string, _type string, id string, data interface{}) (api.BaseResponse, error) { + return IndexWithParameters(pretty, index, _type, id, "", 0, "", "", "", 0, "", "", false, data) +} + +// IndexWithParameters takes all the potential parameters available +func IndexWithParameters(pretty bool, index string, _type string, id string, parentId string, version int, op_type string, + routing string, timestamp string, ttl int, percolate string, timeout string, refresh bool, data interface{}) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + url, err := GetIndexUrl(index, _type, id, parentId, version, op_type, routing, timestamp, ttl, percolate, timeout, refresh) + if err != nil { + return retval, err + } + var method string + if len(id) == 0 { + method = "POST" + } else { + method = "PUT" + } + + body, err := api.DoCommand(method, url, data) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +func GetIndexUrl(index string, _type string, id string, parentId string, version int, op_type string, + routing string, timestamp string, ttl int, percolate string, timeout string, refresh bool) (retval string, e error) { + + if len(index) == 0 { + return "", errors.New("index can not be blank") + } + if len(_type) == 0 { + return "", errors.New("_type can not be blank") + } + var partialURL string + var values url.Values = url.Values{} + partialURL = fmt.Sprintf("/%s/%s", index, _type) + if len(id) > 0 { + partialURL = fmt.Sprintf("%s/%s", partialURL, id) + } + // A child document can be indexed by specifying it’s parent when indexing. + if len(parentId) > 0 { + values.Add("parent", parentId) + } + // versions start at 1, so if greater than 0 + if version > 0 { + values.Add("version", strconv.Itoa(version)) + } + if len(op_type) > 0 { + if len(id) == 0 { + //if id is omited, op_type defaults to 'create' + values.Add("op_type", "create") + } else { + values.Add("op_type", op_type) + } + } + if len(routing) > 0 { + values.Add("routing", routing) + } + // A document can be indexed with a timestamp associated with it. + // The timestamp value of a document can be set using the timestamp parameter. + if len(timestamp) > 0 { + values.Add("timestamp", timestamp) + } + // A document can be indexed with a ttl (time to live) associated with it. Expired documents + // will be expunged automatically. + if ttl > 0 { + values.Add("ttl", strconv.Itoa(ttl)) + } + if len(percolate) > 0 { + values.Add("percolate", percolate) + } + // example 5m + if len(timeout) > 0 { + values.Add("timeout", timeout) + } + + if refresh { + values.Add("refresh", "true") + } + + partialURL += "?" + values.Encode() + return partialURL, nil +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/index_test.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/index_test.go new file mode 100644 index 0000000..f1dffba --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/index_test.go @@ -0,0 +1,117 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "github.com/bmizerany/assert" + "testing" +) + +func TestUrlGeneration(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "create", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, fmt.Sprintf("TestUrlGeneration Should get %s, instead got %s", expectedUrl, url)) +} + +func TestUrlGenerationNoIndex(t *testing.T) { + _, err := GetIndexUrl("", "Type", "Id", "Parent", 1, "create", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err != nil, "err should have been returned") +} + +func TestUrlGenerationNoType(t *testing.T) { + _, err := GetIndexUrl("Index", "", "Id", "Parent", 1, "create", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err != nil, "err should have been returned") +} + +func TestUrlGenerationNoId(t *testing.T) { + expectedUrl := "/Index/Type?op_type=create&parent=Parent&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "", "Parent", 1, "create", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, t, fmt.Sprintf("TestUrlGenerationNoId Should get %s, instead got %s", expectedUrl, url)) +} + +// if Id is blank, op_type should default to create +func TestUrlGenerationNoIdAndOpTypeNotCreate(t *testing.T) { + expectedUrl := "/Index/Type?op_type=create&parent=Parent&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "", "Parent", 1, "notcreate", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoId Should get %s, instead got %s", expectedUrl, url)) +} + +func TestUrlGenerationNoParent(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "", 1, "create", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoParent Should get %s, instead got %s", expectedUrl, url)) +} + +func TestUrlGenerationNoVersion(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 0, "create", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoVersion Should get %s, instead got %s", expectedUrl, url)) +} + +func TestUrlGenerationNoOpType(t *testing.T) { + expectedUrl := "/Index/Type/Id?parent=Parent&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "", "Routing", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoOpType Should get %s, instead got %s", expectedUrl, url)) +} +func TestUrlGenerationNoRouting(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&percolate=Percolate&refresh=true&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "create", "", "TimeStamp", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoRouting Should get %s, instead got %s", expectedUrl, url)) +} +func TestUrlGenerationNoTimestamp(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "create", "Routing", "", 10, "Percolate", "Timeout", true) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoTimestamp Should get %s, instead got %s", expectedUrl, url)) +} +func TestUrlGenerationNoTTL(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&percolate=Percolate&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "create", "Routing", "TimeStamp", 0, "Percolate", "Timeout", true) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoTTL Should get %s, instead got %s", expectedUrl, url)) +} +func TestUrlGenerationNoPercolate(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&refresh=true&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "create", "Routing", "TimeStamp", 10, "", "Timeout", true) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoPercolate Should get %s, instead got %s", expectedUrl, url)) +} +func TestUrlGenerationNoTimeout(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&percolate=Percolate&refresh=true&routing=Routing×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "create", "Routing", "TimeStamp", 10, "Percolate", "", true) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoTimeout Should get %s, instead got %s", expectedUrl, url)) +} +func TestUrlGenerationNoRefresh(t *testing.T) { + expectedUrl := "/Index/Type/Id?op_type=create&parent=Parent&percolate=Percolate&routing=Routing&timeout=Timeout×tamp=TimeStamp&ttl=10&version=1" + url, err := GetIndexUrl("Index", "Type", "Id", "Parent", 1, "create", "Routing", "TimeStamp", 10, "Percolate", "Timeout", false) + assert.T(t, err == nil, t, "err was not nil") + assert.T(t, url == expectedUrl, + fmt.Sprintf("TestUrlGenerationNoRefresh Should get %s, instead got %s", expectedUrl, url)) +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/mget.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/mget.go new file mode 100644 index 0000000..d6ff766 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/mget.go @@ -0,0 +1,64 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +// MGet allows the caller to get multiple documents based on an index, type (optional) and id (and possibly routing). +// The response includes a docs array with all the fetched documents, each element similar in structure to a document +// provided by the get API. +// see http://www.elasticsearch.org/guide/reference/api/multi-get.html +func MGet(pretty bool, index string, _type string, mgetRequest MGetRequestContainer) (MGetResponseContainer, error) { + var url string + var retval MGetResponseContainer + if len(index) <= 0 { + url = fmt.Sprintf("/_mget?%s", api.Pretty(pretty)) + } + if len(_type) > 0 && len(index) > 0 { + url = fmt.Sprintf("/%s/%s/_mget?%s", index, _type, api.Pretty(pretty)) + } else if len(index) > 0 { + url = fmt.Sprintf("/%s/_mget?%s", index, api.Pretty(pretty)) + } + body, err := api.DoCommand("GET", url, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + fmt.Println(body) + return retval, err +} + +type MGetRequestContainer struct { + Docs []MGetRequest `json:"docs"` +} + +type MGetRequest struct { + Index string `json:"_index"` + Type string `json:"_type"` + ID string `json:"_id"` + IDS []string `json:"_ids,omitempty"` + Fields []string `json:"fields,omitempty"` +} + +type MGetResponseContainer struct { + Docs []api.BaseResponse `json:"docs"` +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/moreLikeThis.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/moreLikeThis.go new file mode 100644 index 0000000..e48086f --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/moreLikeThis.go @@ -0,0 +1,59 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +// MoreLikeThis allows the caller to get documents that are “like” a specified document. +// http://www.elasticsearch.org/guide/reference/api/more-like-this.html +func MoreLikeThis(pretty bool, index string, _type string, id string, query MoreLikeThisQuery) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + url = fmt.Sprintf("/%s/%s/%s/_mlt?%s", index, _type, id, api.Pretty(pretty)) + body, err := api.DoCommand("GET", url, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + fmt.Println(body) + return retval, err +} + +type MoreLikeThisQuery struct { + MoreLikeThis MLT `json:"more_like_this"` +} + +type MLT struct { + Fields []string `json:"fields"` + LikeText string `json:"like_text"` + PercentTermsToMatch float32 `json:"percent_terms_to_match"` + MinTermFrequency int `json:"min_term_freq"` + MaxQueryTerms int `json:"max_query_terms"` + StopWords []string `json:"stop_words"` + MinDocFrequency int `json:"min_doc_freq"` + MaxDocFrequency int `json:"max_doc_freq"` + MinWordLength int `json:"min_word_len"` + MaxWordLength int `json:"max_word_len"` + BoostTerms int `json:"boost_terms"` + Boost float32 `json:"boost"` + Analyzer string `json:"analyzer"` +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/msearch.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/msearch.go new file mode 100644 index 0000000..0527446 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/msearch.go @@ -0,0 +1,12 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/percolate.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/percolate.go new file mode 100644 index 0000000..0256f7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/percolate.go @@ -0,0 +1,62 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +// RegisterPercolate allows the caller to register queries against an index, and then send percolate requests which include a doc, and +// getting back the queries that match on that doc out of the set of registered queries. +// Think of it as the reverse operation of indexing and then searching. Instead of sending docs, indexing them, +// and then running queries. One sends queries, registers them, and then sends docs and finds out which queries +// match that doc. +// see http://www.elasticsearch.org/guide/reference/api/percolate.html +func RegisterPercolate(pretty bool, index string, name string, query api.Query) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + url = fmt.Sprintf("/_percolator/%s/%s?%s", index, name, api.Pretty(pretty)) + body, err := api.DoCommand("PUT", url, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + fmt.Println(body) + return retval, err +} + +func Percolate(pretty bool, index string, _type string, name string, doc string) (api.Match, error) { + var url string + var retval api.Match + url = fmt.Sprintf("/%s/%s/_percolate?%s", index, _type, api.Pretty(pretty)) + body, err := api.DoCommand("GET", url, doc) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + fmt.Println(body) + return retval, err +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/search.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/search.go new file mode 100644 index 0000000..8be6209 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/search.go @@ -0,0 +1,168 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "net/url" + "strconv" +) + +var ( + DebugRequests = false +) + +// SearchRequest performs a very basic search on an index via the request URI API. +// +// params: +// @pretty: bool for pretty reply or not, a parameter to elasticsearch +// @index: the elasticsearch index +// @_type: optional ("" if not used) search specific type in this index +// @query: this can be one of 3 types: +// 1) string value that is valid elasticsearch +// 2) io.Reader that can be set in body (also valid elasticsearch string syntax..) +// 3) other type marshalable to json (also valid elasticsearch json) +// +// out, err := SearchRequest(true, "github","",qryType ,"", 0) +// +// http://www.elasticsearch.org/guide/reference/api/search/uri-request.html +func SearchRequest(pretty bool, index string, _type string, query interface{}, scroll string, scan int) (SearchResult, error) { + var uriVal string + var retval SearchResult + if len(_type) > 0 && _type != "*" { + uriVal = fmt.Sprintf("/%s/%s/_search?%s%s%s", index, _type, api.Pretty(pretty), api.Scroll(scroll), api.Scan(scan)) + } else { + uriVal = fmt.Sprintf("/%s/_search?%s%s%s", index, api.Pretty(pretty), api.Scroll(scroll), api.Scan(scan)) + } + body, err := api.DoCommand("POST", uriVal, query) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal([]byte(body), &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +// SearchUri performs the simplest possible query in url string +// params: +// @index: the elasticsearch index +// @_type: optional ("" if not used) search specific type in this index +// @query: valid string lucene search syntax +// +// out, err := SearchUri("github","",`user:kimchy` ,"", 0) +// +// produces a request like this: host:9200/github/_search?q=user:kimchy" +// +// http://www.elasticsearch.org/guide/reference/api/search/uri-request.html +func SearchUri(index, _type string, query, scroll string, scan int) (SearchResult, error) { + var uriVal string + var retval SearchResult + query = url.QueryEscape(query) + if len(_type) > 0 && _type != "*" { + uriVal = fmt.Sprintf("/%s/%s/_search?q=%s%s%s", index, _type, query, api.Scroll(scroll), api.Scan(scan)) + } else { + uriVal = fmt.Sprintf("/%s/_search?q=%s%s%s", index, query, api.Scroll(scroll), api.Scan(scan)) + } + //log.Println(uriVal) + body, err := api.DoCommand("GET", uriVal, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal([]byte(body), &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +func Scroll(pretty bool, scroll_id string, scroll string) (SearchResult, error) { + var url string + var retval SearchResult + + url = fmt.Sprintf("/_search/scroll?%s%s", api.Pretty(pretty), api.Scroll(scroll)) + + body, err := api.DoCommand("POST", url, scroll_id) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal([]byte(body), &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +type SearchResult struct { + Took int `json:"took"` + TimedOut bool `json:"timed_out"` + ShardStatus api.Status `json:"_shards"` + Hits Hits `json:"hits"` + Facets json.RawMessage `json:"facets,omitempty"` // structure varies on query + ScrollId string `json:"_scroll_id,omitempty"` +} + +func (s *SearchResult) String() string { + return fmt.Sprintf("", s.Took, s.TimedOut, s.Hits.Total) +} + +type Hits struct { + Total int `json:"total"` + // MaxScore float32 `json:"max_score"` + Hits []Hit `json:"hits"` +} + +func (h *Hits) Len() int { + return len(h.Hits) +} + +type Hit struct { + Index string `json:"_index"` + Type string `json:"_type,omitempty"` + Id string `json:"_id"` + Score Float32Nullable `json:"_score,omitempty"` // Filters (no query) dont have score, so is null + Source json.RawMessage `json:"_source"` // marshalling left to consumer + Fields json.RawMessage `json:"fields"` // when a field arg is passed to ES, instead of _source it returns fields +} + +// Elasticsearch returns some invalid (according to go) json, with floats having... +// +// json: cannot unmarshal null into Go value of type float32 (see last field.) +// +// "hits":{"total":6808,"max_score":null, +// "hits":[{"_index":"10user","_type":"user","_id":"751820","_score":null, +type Float32Nullable float32 + +func (i *Float32Nullable) UnmarshalJSON(data []byte) error { + if len(data) == 0 || string(data) == "null" { + return nil + } + + if in, err := strconv.ParseFloat(string(data), 32); err != nil { + return err + } else { + *i = Float32Nullable(in) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/search_test.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/search_test.go new file mode 100644 index 0000000..a779f71 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/search_test.go @@ -0,0 +1,31 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "github.com/bmizerany/assert" + "testing" +) + +func TestSearchRequest(t *testing.T) { + qry := map[string]interface{}{ + "query": map[string]interface{}{ + "wildcard": map[string]string{"actor": "a*"}, + }, + } + out, err := SearchRequest(true, "github", "", qry, "", 0) + //log.Println(out) + assert.T(t, &out != nil && err == nil, fmt.Sprintf("Should get docs")) + assert.T(t, out.Hits.Len() == 10, fmt.Sprintf("Should have 10 docs but was %v", out.Hits.Len())) + assert.T(t, CloseInt(out.Hits.Total, 588), fmt.Sprintf("Should have 588 hits but was %v", out.Hits.Total)) +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/test_test.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/test_test.go new file mode 100644 index 0000000..21cff7f --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/test_test.go @@ -0,0 +1,192 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/md5" + "encoding/json" + "flag" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "io" + "log" + "net/http" + "os" + "testing" + "time" +) + +/* + +usage: + + test -v -host eshost -loaddata + +*/ + +var ( + _ = os.ModeDir + bulkStarted bool + hasLoadedData bool + hasStartedTesting bool + eshost *string = flag.String("host", "localhost", "Elasticsearch Server Host Address") + loadData *bool = flag.Bool("loaddata", false, "This loads a bunch of test data into elasticsearch for testing") +) + +func init() { + +} +func InitTests(startIndexer bool) { + if !hasStartedTesting { + flag.Parse() + hasStartedTesting = true + log.SetFlags(log.Ltime | log.Lshortfile) + api.Domain = *eshost + } + if startIndexer && !bulkStarted { + BulkDelaySeconds = 1 + bulkStarted = true + log.Println("start global test bulk indexer") + BulkIndexerGlobalRun(100, make(chan bool)) + if *loadData && !hasLoadedData { + log.Println("load test data ") + hasLoadedData = true + LoadTestData() + } + } +} + +// dumb simple assert for testing, printing +// Assert(len(items) == 9, t, "Should be 9 but was %d", len(items)) +func Assert(is bool, t *testing.T, format string, args ...interface{}) { + if is == false { + log.Printf(format, args...) + t.Fail() + } +} + +// Wait for condition (defined by func) to be true, a utility to create a ticker +// checking every 100 ms to see if something (the supplied check func) is done +// +// WaitFor(func() bool { +// return ctr.Ct == 0 +// },10) +// +// @timeout (in seconds) is the last arg +func WaitFor(check func() bool, timeoutSecs int) { + timer := time.NewTicker(100 * time.Millisecond) + tryct := 0 + for _ = range timer.C { + if check() { + timer.Stop() + break + } + if tryct >= timeoutSecs*10 { + timer.Stop() + break + } + tryct++ + } +} + +func TestFake(t *testing.T) { + +} + +type GithubEvent struct { + Url string + Created time.Time `json:"created_at"` + Type string +} + +// This loads test data from github archives (~6700 docs) +func LoadTestData() { + docCt := 0 + errCt := 0 + indexer := NewBulkIndexer(20) + indexer.BulkSendor = func(buf *bytes.Buffer) error { + log.Printf("Sent %d bytes total %d docs sent", buf.Len(), docCt) + req, err := api.ElasticSearchRequest("POST", "/_bulk") + if err != nil { + errCt += 1 + log.Println("ERROR: ", err) + return err + } + req.SetBody(buf) + res, err := http.DefaultClient.Do((*http.Request)(req)) + if err != nil { + errCt += 1 + log.Println("ERROR: ", err) + return err + } + if res.StatusCode != 200 { + log.Printf("Not 200! %d \n", res.StatusCode) + } + return err + } + done := make(chan bool) + indexer.Run(done) + resp, err := http.Get("http://data.githubarchive.org/2012-12-10-15.json.gz") + if err != nil || resp == nil { + panic("Could not download data") + } + defer resp.Body.Close() + if err != nil { + log.Println(err) + return + } + gzReader, err := gzip.NewReader(resp.Body) + defer gzReader.Close() + if err != nil { + panic(err) + } + r := bufio.NewReader(gzReader) + var ge GithubEvent + docsm := make(map[string]bool) + h := md5.New() + for { + line, err := r.ReadBytes('\n') + if err != nil && err != io.EOF { + log.Println("FATAL: could not read line? ", err) + } else if err != nil { + indexer.Flush() + break + } + if err := json.Unmarshal(line, &ge); err == nil { + // create an "ID" + h.Write(line) + id := fmt.Sprintf("%x", h.Sum(nil)) + if _, ok := docsm[id]; ok { + log.Println("HM, already exists? ", ge.Url) + } + docsm[id] = true + indexer.Index("github", ge.Type, id, "", &ge.Created, line) + docCt++ + //log.Println(docCt, " ", string(line)) + //os.Exit(1) + } else { + log.Println("ERROR? ", string(line)) + } + + } + if errCt != 0 { + log.Println("FATAL, could not load ", errCt) + } + // lets wait a bit to ensure that elasticsearch finishes? + time.Sleep(time.Second * 5) + if len(docsm) != docCt { + panic(fmt.Sprintf("Docs didn't match? %d:%d", len(docsm), docCt)) + } +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/update.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/update.go new file mode 100644 index 0000000..199f5f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/update.go @@ -0,0 +1,96 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +// Update updates a document based on a script provided. The operation gets the document +// (collocated with the shard) from the index, runs the script (with optional script language and parameters), +// and index back the result (also allows to delete, or ignore the operation). It uses versioning to make sure +// no updates have happened during the “get” and “reindex”. (available from 0.19 onwards). +// Note, this operation still means full reindex of the document, it just removes some network roundtrips +// and reduces chances of version conflicts between the get and the index. The _source field need to be enabled +// for this feature to work. +// +// http://www.elasticsearch.org/guide/reference/api/update.html +// TODO: finish this, it's fairly complex +func Update(pretty bool, index string, _type string, id string, data interface{}) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + url = fmt.Sprintf("/%s/%s/%s/_update?%s", index, _type, id, api.Pretty(pretty)) + body, err := api.DoCommand("POST", url, data) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + return retval, err +} + +// UpdateWithPartialDoc updates a document based on partial document provided. The update API also +// support passing a partial document (since 0.20), which will be merged into the existing +// document (simple recursive merge, inner merging of objects, replacing core "keys/values" and arrays). +// If both doc and script is specified, then doc is ignored. Best is to put your field pairs of the partial +// document in the script itself. +// +// http://www.elasticsearch.org/guide/reference/api/update.html +func UpdateWithPartialDoc(pretty bool, index string, _type string, id string, doc interface{}, upsert bool) (api.BaseResponse, error) { + switch v := doc.(type) { + case string: + upsertStr := "" + if upsert { + upsertStr = ", \"doc_as_upsert\":true" + } + content := fmt.Sprintf("{\"doc\":%s %s}", v, upsertStr) + return Update(pretty, index, _type, id, content) + default: + var data map[string]interface{} = make(map[string]interface{}) + data["doc"] = doc + if upsert { + data["doc_as_upsert"] = true + } + return Update(pretty, index, _type, id, data) + } +} + +// UpdateWithScript updates a document based on a script provided. +// The operation gets the document (collocated with the shard) from the index, runs the script +// (with optional script language and parameters), and index back the result (also allows to +// delete, or ignore the operation). It uses versioning to make sure no updates have happened +// during the "get" and "reindex". (available from 0.19 onwards). +// +// Note, this operation still means full reindex of the document, it just removes some network +// roundtrips and reduces chances of version conflicts between the get and the index. The _source +// field need to be enabled for this feature to work. +// http://www.elasticsearch.org/guide/reference/api/update.html +func UpdateWithScript(pretty bool, index string, _type string, id string, script string, params interface{}) (api.BaseResponse, error) { + switch v := params.(type) { + case string: + paramsPart := fmt.Sprintf("{\"params\":%s}", v) + data := fmt.Sprintf("{\"script\":\"%s\", \"params\":%s}", script, paramsPart) + return Update(pretty, index, _type, id, data) + default: + var data map[string]interface{} = make(map[string]interface{}) + data["params"] = params + data["script"] = script + return Update(pretty, index, _type, id, data) + } +} diff --git a/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/validate.go b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/validate.go new file mode 100644 index 0000000..c3b7b05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mattbaird/elastigo/core/validate.go @@ -0,0 +1,55 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "encoding/json" + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" +) + +// Validate allows a user to validate a potentially expensive query without executing it. +// see http://www.elasticsearch.org/guide/reference/api/validate.html +func Validate(pretty bool, index string, _type string, query string, explain bool) (api.BaseResponse, error) { + var url string + var retval api.BaseResponse + if len(_type) > 0 { + url = fmt.Sprintf("/%s/%s/_validate/query?q=%s&%s&explain=%t", index, _type, query, api.Pretty(pretty), explain) + } else { + url = fmt.Sprintf("/%s/_validate/query?q=%s&%s&explain=%t", index, query, api.Pretty(pretty), explain) + } + body, err := api.DoCommand("GET", url, nil) + if err != nil { + return retval, err + } + if err == nil { + // marshall into json + jsonErr := json.Unmarshal(body, &retval) + if jsonErr != nil { + return retval, jsonErr + } + } + fmt.Println(body) + return retval, err +} + +type Validation struct { + Valid bool `json:"valid"` + Shards api.Status `json:"_shards"` + Explainations []Explaination `json:"explanations,omitempty"` +} + +type Explaination struct { + Index string `json:"index"` + Valid bool `json:"valid"` + Error string `json:"error"` +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go new file mode 100644 index 0000000..19368db --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go @@ -0,0 +1,216 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "errors" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +var ( + errBatchTooShort = errors.New("leveldb: batch is too short") + errBatchBadRecord = errors.New("leveldb: bad record in batch") +) + +const kBatchHdrLen = 8 + 4 + +type batchReplay interface { + put(key, value []byte, seq uint64) + delete(key []byte, seq uint64) +} + +// Batch is a write batch. +type Batch struct { + buf []byte + rLen, bLen int + seq uint64 + sync bool +} + +func (b *Batch) grow(n int) { + off := len(b.buf) + if off == 0 { + // include headers + off = kBatchHdrLen + n += off + } + if cap(b.buf)-off >= n { + return + } + buf := make([]byte, 2*cap(b.buf)+n) + copy(buf, b.buf) + b.buf = buf[:off] +} + +func (b *Batch) appendRec(t vType, key, value []byte) { + n := 1 + binary.MaxVarintLen32 + len(key) + if t == tVal { + n += binary.MaxVarintLen32 + len(value) + } + b.grow(n) + off := len(b.buf) + buf := b.buf[:off+n] + buf[off] = byte(t) + off += 1 + off += binary.PutUvarint(buf[off:], uint64(len(key))) + copy(buf[off:], key) + off += len(key) + if t == tVal { + off += binary.PutUvarint(buf[off:], uint64(len(value))) + copy(buf[off:], value) + off += len(value) + } + b.buf = buf[:off] + b.rLen++ + // Include 8-byte ikey header + b.bLen += len(key) + len(value) + 8 +} + +// Put appends 'put operation' of the given key/value pair to the batch. +// It is safe to modify the contents of the argument after Put returns. +func (b *Batch) Put(key, value []byte) { + b.appendRec(tVal, key, value) +} + +// Delete appends 'delete operation' of the given key to the batch. +// It is safe to modify the contents of the argument after Delete returns. +func (b *Batch) Delete(key []byte) { + b.appendRec(tDel, key, nil) +} + +// Reset resets the batch. +func (b *Batch) Reset() { + b.buf = nil + b.seq = 0 + b.rLen = 0 + b.bLen = 0 + b.sync = false +} + +func (b *Batch) init(sync bool) { + b.sync = sync +} + +func (b *Batch) put(key, value []byte, seq uint64) { + if b.rLen == 0 { + b.seq = seq + } + b.Put(key, value) +} + +func (b *Batch) delete(key []byte, seq uint64) { + if b.rLen == 0 { + b.seq = seq + } + b.Delete(key) +} + +func (b *Batch) append(p *Batch) { + if p.rLen > 0 { + b.grow(len(p.buf) - kBatchHdrLen) + b.buf = append(b.buf, p.buf[kBatchHdrLen:]...) + b.rLen += p.rLen + } + if p.sync { + b.sync = true + } +} + +func (b *Batch) len() int { + return b.rLen +} + +func (b *Batch) size() int { + return b.bLen +} + +func (b *Batch) encode() []byte { + b.grow(0) + binary.LittleEndian.PutUint64(b.buf, b.seq) + binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen)) + + return b.buf +} + +func (b *Batch) decode(buf []byte) error { + if len(buf) < kBatchHdrLen { + return errBatchTooShort + } + + b.seq = binary.LittleEndian.Uint64(buf) + b.rLen = int(binary.LittleEndian.Uint32(buf[8:])) + // No need to be precise at this point, it won't be used anyway + b.bLen = len(buf) - kBatchHdrLen + b.buf = buf + + return nil +} + +func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error { + off := kBatchHdrLen + for i := 0; i < b.rLen; i++ { + if off >= len(b.buf) { + return errors.New("leveldb: invalid batch record length") + } + + t := vType(b.buf[off]) + if t > tVal { + return errors.New("leveldb: invalid batch record type in batch") + } + off += 1 + + x, n := binary.Uvarint(b.buf[off:]) + off += n + if n <= 0 || off+int(x) > len(b.buf) { + return errBatchBadRecord + } + key := b.buf[off : off+int(x)] + off += int(x) + + var value []byte + if t == tVal { + x, n := binary.Uvarint(b.buf[off:]) + off += n + if n <= 0 || off+int(x) > len(b.buf) { + return errBatchBadRecord + } + value = b.buf[off : off+int(x)] + off += int(x) + } + + f(i, t, key, value) + } + + return nil +} + +func (b *Batch) replay(to batchReplay) error { + return b.decodeRec(func(i int, t vType, key, value []byte) { + switch t { + case tVal: + to.put(key, value, b.seq+uint64(i)) + case tDel: + to.delete(key, b.seq+uint64(i)) + } + }) +} + +func (b *Batch) memReplay(to *memdb.DB) error { + return b.decodeRec(func(i int, t vType, key, value []byte) { + ikey := newIKey(key, b.seq+uint64(i), t) + to.Put(ikey, value) + }) +} + +func (b *Batch) revertMemReplay(to *memdb.DB) error { + return b.decodeRec(func(i int, t vType, key, value []byte) { + ikey := newIKey(key, b.seq+uint64(i), t) + to.Delete(ikey) + }) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go new file mode 100644 index 0000000..4b3587c --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go @@ -0,0 +1,120 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +type tbRec struct { + t vType + key, value []byte +} + +type testBatch struct { + rec []*tbRec +} + +func (p *testBatch) put(key, value []byte, seq uint64) { + p.rec = append(p.rec, &tbRec{tVal, key, value}) +} + +func (p *testBatch) delete(key []byte, seq uint64) { + p.rec = append(p.rec, &tbRec{tDel, key, nil}) +} + +func compareBatch(t *testing.T, b1, b2 *Batch) { + if b1.seq != b2.seq { + t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) + } + if b1.len() != b2.len() { + t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len()) + } + p1, p2 := new(testBatch), new(testBatch) + err := b1.replay(p1) + if err != nil { + t.Fatal("error when replaying batch 1: ", err) + } + err = b2.replay(p2) + if err != nil { + t.Fatal("error when replaying batch 2: ", err) + } + for i := range p1.rec { + r1, r2 := p1.rec[i], p2.rec[i] + if r1.t != r2.t { + t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t) + } + if !bytes.Equal(r1.key, r2.key) { + t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) + } + if r1.t == tVal { + if !bytes.Equal(r1.value, r2.value) { + t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) + } + } + } +} + +func TestBatch_EncodeDecode(t *testing.T) { + b1 := new(Batch) + b1.seq = 10009 + b1.Put([]byte("key1"), []byte("value1")) + b1.Put([]byte("key2"), []byte("value2")) + b1.Delete([]byte("key1")) + b1.Put([]byte("k"), []byte("")) + b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) + b1.Delete([]byte("key10000")) + b1.Delete([]byte("k")) + buf := b1.encode() + b2 := new(Batch) + err := b2.decode(buf) + if err != nil { + t.Error("error when decoding batch: ", err) + } + compareBatch(t, b1, b2) +} + +func TestBatch_Append(t *testing.T) { + b1 := new(Batch) + b1.seq = 10009 + b1.Put([]byte("key1"), []byte("value1")) + b1.Put([]byte("key2"), []byte("value2")) + b1.Delete([]byte("key1")) + b1.Put([]byte("foo"), []byte("foovalue")) + b1.Put([]byte("bar"), []byte("barvalue")) + b2a := new(Batch) + b2a.seq = 10009 + b2a.Put([]byte("key1"), []byte("value1")) + b2a.Put([]byte("key2"), []byte("value2")) + b2a.Delete([]byte("key1")) + b2b := new(Batch) + b2b.Put([]byte("foo"), []byte("foovalue")) + b2b.Put([]byte("bar"), []byte("barvalue")) + b2a.append(b2b) + compareBatch(t, b1, b2a) +} + +func TestBatch_Size(t *testing.T) { + b := new(Batch) + for i := 0; i < 2; i++ { + b.Put([]byte("key1"), []byte("value1")) + b.Put([]byte("key2"), []byte("value2")) + b.Delete([]byte("key1")) + b.Put([]byte("foo"), []byte("foovalue")) + b.Put([]byte("bar"), []byte("barvalue")) + mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) + b.memReplay(mem) + if b.size() != mem.Size() { + t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) + } + b.Reset() + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go new file mode 100644 index 0000000..3e473fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go @@ -0,0 +1,464 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "fmt" + "math/rand" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +func randomString(r *rand.Rand, n int) []byte { + b := new(bytes.Buffer) + for i := 0; i < n; i++ { + b.WriteByte(' ' + byte(r.Intn(95))) + } + return b.Bytes() +} + +func compressibleStr(r *rand.Rand, frac float32, n int) []byte { + nn := int(float32(n) * frac) + rb := randomString(r, nn) + b := make([]byte, 0, n+nn) + for len(b) < n { + b = append(b, rb...) + } + return b[:n] +} + +type valueGen struct { + src []byte + pos int +} + +func newValueGen(frac float32) *valueGen { + v := new(valueGen) + r := rand.New(rand.NewSource(301)) + v.src = make([]byte, 0, 1048576+100) + for len(v.src) < 1048576 { + v.src = append(v.src, compressibleStr(r, frac, 100)...) + } + return v +} + +func (v *valueGen) get(n int) []byte { + if v.pos+n > len(v.src) { + v.pos = 0 + } + v.pos += n + return v.src[v.pos-n : v.pos] +} + +var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) + +type dbBench struct { + b *testing.B + stor storage.Storage + db *DB + + o *opt.Options + ro *opt.ReadOptions + wo *opt.WriteOptions + + keys, values [][]byte +} + +func openDBBench(b *testing.B, noCompress bool) *dbBench { + _, err := os.Stat(benchDB) + if err == nil { + err = os.RemoveAll(benchDB) + if err != nil { + b.Fatal("cannot remove old db: ", err) + } + } + + p := &dbBench{ + b: b, + o: &opt.Options{}, + ro: &opt.ReadOptions{}, + wo: &opt.WriteOptions{}, + } + p.stor, err = storage.OpenFile(benchDB) + if err != nil { + b.Fatal("cannot open stor: ", err) + } + if noCompress { + p.o.Compression = opt.NoCompression + } + + p.db, err = Open(p.stor, p.o) + if err != nil { + b.Fatal("cannot open db: ", err) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + return p +} + +func (p *dbBench) reopen() { + p.db.Close() + var err error + p.db, err = Open(p.stor, p.o) + if err != nil { + p.b.Fatal("Reopen: got error: ", err) + } +} + +func (p *dbBench) populate(n int) { + p.keys, p.values = make([][]byte, n), make([][]byte, n) + v := newValueGen(0.5) + for i := range p.keys { + p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) + } +} + +func (p *dbBench) randomize() { + m := len(p.keys) + times := m * 2 + r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) + for n := 0; n < times; n++ { + i, j := r1.Int()%m, r2.Int()%m + if i == j { + continue + } + p.keys[i], p.keys[j] = p.keys[j], p.keys[i] + p.values[i], p.values[j] = p.values[j], p.values[i] + } +} + +func (p *dbBench) writes(perBatch int) { + b := p.b + db := p.db + + n := len(p.keys) + m := n / perBatch + if n%perBatch > 0 { + m++ + } + batches := make([]Batch, m) + j := 0 + for i := range batches { + first := true + for ; j < n && ((j+1)%perBatch != 0 || first); j++ { + first = false + batches[i].Put(p.keys[j], p.values[j]) + } + } + runtime.GC() + + b.ResetTimer() + b.StartTimer() + for i := range batches { + err := db.Write(&(batches[i]), p.wo) + if err != nil { + b.Fatal("write failed: ", err) + } + } + b.StopTimer() + b.SetBytes(116) +} + +func (p *dbBench) gc() { + p.keys, p.values = nil, nil + runtime.GC() +} + +func (p *dbBench) puts() { + b := p.b + db := p.db + + b.ResetTimer() + b.StartTimer() + for i := range p.keys { + err := db.Put(p.keys[i], p.values[i], p.wo) + if err != nil { + b.Fatal("put failed: ", err) + } + } + b.StopTimer() + b.SetBytes(116) +} + +func (p *dbBench) fill() { + b := p.b + db := p.db + + perBatch := 10000 + batch := new(Batch) + for i, n := 0, len(p.keys); i < n; { + first := true + for ; i < n && ((i+1)%perBatch != 0 || first); i++ { + first = false + batch.Put(p.keys[i], p.values[i]) + } + err := db.Write(batch, p.wo) + if err != nil { + b.Fatal("write failed: ", err) + } + batch.Reset() + } +} + +func (p *dbBench) gets() { + b := p.b + db := p.db + + b.ResetTimer() + for i := range p.keys { + _, err := db.Get(p.keys[i], p.ro) + if err != nil { + b.Error("got error: ", err) + } + } + b.StopTimer() +} + +func (p *dbBench) seeks() { + b := p.b + + iter := p.newIter() + defer iter.Release() + b.ResetTimer() + for i := range p.keys { + if !iter.Seek(p.keys[i]) { + b.Error("value not found for: ", string(p.keys[i])) + } + } + b.StopTimer() +} + +func (p *dbBench) newIter() iterator.Iterator { + iter := p.db.NewIterator(nil, p.ro) + err := iter.Error() + if err != nil { + p.b.Fatal("cannot create iterator: ", err) + } + return iter +} + +func (p *dbBench) close() { + if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { + p.b.Log("Block pool stats: ", bp) + } + p.db.Close() + p.stor.Close() + os.RemoveAll(benchDB) + p.db = nil + p.keys = nil + p.values = nil + runtime.GC() + runtime.GOMAXPROCS(1) +} + +func BenchmarkDBWrite(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBWriteBatch(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1000) + p.close() +} + +func BenchmarkDBWriteUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBWriteBatchUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.writes(1000) + p.close() +} + +func BenchmarkDBWriteRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.randomize() + p.writes(1) + p.close() +} + +func BenchmarkDBWriteRandomSync(b *testing.B) { + p := openDBBench(b, false) + p.wo.Sync = true + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBOverwrite(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.writes(1) + p.close() +} + +func BenchmarkDBOverwriteRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.randomize() + p.writes(1) + p.close() +} + +func BenchmarkDBPut(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.puts() + p.close() +} + +func BenchmarkDBRead(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadGC(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadTable(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.reopen() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadReverse(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + iter.Last() + for iter.Prev() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadReverseTable(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.reopen() + p.gc() + + iter := p.newIter() + b.ResetTimer() + iter.Last() + for iter.Prev() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBSeek(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.seeks() + p.close() +} + +func BenchmarkDBSeekRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.randomize() + p.seeks() + p.close() +} + +func BenchmarkDBGet(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gets() + p.close() +} + +func BenchmarkDBGetRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.randomize() + p.gets() + p.close() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go new file mode 100644 index 0000000..baced77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -0,0 +1,159 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package cache provides interface and implementation of a cache algorithms. +package cache + +import ( + "sync/atomic" +) + +// SetFunc is the function that will be called by Namespace.Get to create +// a cache object, if charge is less than one than the cache object will +// not be registered to cache tree, if value is nil then the cache object +// will not be created. +type SetFunc func() (charge int, value interface{}) + +// DelFin is the function that will be called as the result of a delete operation. +// Exist == true is indication that the object is exist, and pending == true is +// indication of deletion already happen but haven't done yet (wait for all handles +// to be released). And exist == false means the object doesn't exist. +type DelFin func(exist, pending bool) + +// PurgeFin is the function that will be called as the result of a purge operation. +type PurgeFin func(ns, key uint64) + +// Cache is a cache tree. A cache instance must be goroutine-safe. +type Cache interface { + // SetCapacity sets cache tree capacity. + SetCapacity(capacity int) + + // Capacity returns cache tree capacity. + Capacity() int + + // Used returns used cache tree capacity. + Used() int + + // Size returns entire alive cache objects size. + Size() int + + // NumObjects returns number of alive objects. + NumObjects() int + + // GetNamespace gets cache namespace with the given id. + // GetNamespace is never return nil. + GetNamespace(id uint64) Namespace + + // PurgeNamespace purges cache namespace with the given id from this cache tree. + // Also read Namespace.Purge. + PurgeNamespace(id uint64, fin PurgeFin) + + // ZapNamespace detaches cache namespace with the given id from this cache tree. + // Also read Namespace.Zap. + ZapNamespace(id uint64) + + // Purge purges all cache namespace from this cache tree. + // This is behave the same as calling Namespace.Purge method on all cache namespace. + Purge(fin PurgeFin) + + // Zap detaches all cache namespace from this cache tree. + // This is behave the same as calling Namespace.Zap method on all cache namespace. + Zap() +} + +// Namespace is a cache namespace. A namespace instance must be goroutine-safe. +type Namespace interface { + // Get gets cache object with the given key. + // If cache object is not found and setf is not nil, Get will atomically creates + // the cache object by calling setf. Otherwise Get will returns nil. + // + // The returned cache handle should be released after use by calling Release + // method. + Get(key uint64, setf SetFunc) Handle + + // Delete removes cache object with the given key from cache tree. + // A deleted cache object will be released as soon as all of its handles have + // been released. + // Delete only happen once, subsequent delete will consider cache object doesn't + // exist, even if the cache object ins't released yet. + // + // If not nil, fin will be called if the cache object doesn't exist or when + // finally be released. + // + // Delete returns true if such cache object exist and never been deleted. + Delete(key uint64, fin DelFin) bool + + // Purge removes all cache objects within this namespace from cache tree. + // This is the same as doing delete on all cache objects. + // + // If not nil, fin will be called on all cache objects when its finally be + // released. + Purge(fin PurgeFin) + + // Zap detaches namespace from cache tree and release all its cache objects. + // A zapped namespace can never be filled again. + // Calling Get on zapped namespace will always return nil. + Zap() +} + +// Handle is a cache handle. +type Handle interface { + // Release releases this cache handle. This method can be safely called mutiple + // times. + Release() + + // Value returns value of this cache handle. + // Value will returns nil after this cache handle have be released. + Value() interface{} +} + +const ( + DelNotExist = iota + DelExist + DelPendig +) + +// Namespace state. +type nsState int + +const ( + nsEffective nsState = iota + nsZapped +) + +// Node state. +type nodeState int + +const ( + nodeZero nodeState = iota + nodeEffective + nodeEvicted + nodeDeleted +) + +// Fake handle. +type fakeHandle struct { + value interface{} + fin func() + once uint32 +} + +func (h *fakeHandle) Value() interface{} { + if atomic.LoadUint32(&h.once) == 0 { + return h.value + } + return nil +} + +func (h *fakeHandle) Release() { + if !atomic.CompareAndSwapUint32(&h.once, 0, 1) { + return + } + if h.fin != nil { + h.fin() + h.fin = nil + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go new file mode 100644 index 0000000..299f6f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go @@ -0,0 +1,597 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type releaserFunc struct { + fn func() + value interface{} +} + +func (r releaserFunc) Release() { + if r.fn != nil { + r.fn() + } +} + +func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle { + return ns.Get(key, func() (int, interface{}) { + if relf != nil { + return charge, releaserFunc{relf, value} + } else { + return charge, value + } + }) +} + +func TestCache_HitMiss(t *testing.T) { + cases := []struct { + key uint64 + value string + }{ + {1, "vvvvvvvvv"}, + {100, "v1"}, + {0, "v2"}, + {12346, "v3"}, + {777, "v4"}, + {999, "v5"}, + {7654, "v6"}, + {2, "v7"}, + {3, "v8"}, + {9, "v9"}, + } + + setfin := 0 + c := NewLRUCache(1000) + ns := c.GetNamespace(0) + for i, x := range cases { + set(ns, x.key, x.value, len(x.value), func() { + setfin++ + }).Release() + for j, y := range cases { + h := ns.Get(y.key, nil) + if j <= i { + // should hit + if h == nil { + t.Errorf("case '%d' iteration '%d' is miss", i, j) + } else { + if x := h.Value().(releaserFunc).value.(string); x != y.value { + t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) + } + } + } else { + // should miss + if h != nil { + t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) + } + } + if h != nil { + h.Release() + } + } + } + + for i, x := range cases { + finalizerOk := false + ns.Delete(x.key, func(exist, pending bool) { + finalizerOk = true + }) + + if !finalizerOk { + t.Errorf("case %d delete finalizer not executed", i) + } + + for j, y := range cases { + h := ns.Get(y.key, nil) + if j > i { + // should hit + if h == nil { + t.Errorf("case '%d' iteration '%d' is miss", i, j) + } else { + if x := h.Value().(releaserFunc).value.(string); x != y.value { + t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) + } + } + } else { + // should miss + if h != nil { + t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) + } + } + if h != nil { + h.Release() + } + } + } + + if setfin != len(cases) { + t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) + } +} + +func TestLRUCache_Eviction(t *testing.T) { + c := NewLRUCache(12) + ns := c.GetNamespace(0) + o1 := set(ns, 1, 1, 1, nil) + set(ns, 2, 2, 1, nil).Release() + set(ns, 3, 3, 1, nil).Release() + set(ns, 4, 4, 1, nil).Release() + set(ns, 5, 5, 1, nil).Release() + if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2 + h.Release() + } + set(ns, 9, 9, 10, nil).Release() // 5,2,9 + + for _, key := range []uint64{9, 2, 5, 1} { + h := ns.Get(key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + o1.Release() + for _, key := range []uint64{1, 2, 5} { + h := ns.Get(key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + for _, key := range []uint64{3, 4, 9} { + h := ns.Get(key, nil) + if h != nil { + t.Errorf("hit for key '%d'", key) + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } +} + +func TestLRUCache_SetGet(t *testing.T) { + c := NewLRUCache(13) + ns := c.GetNamespace(0) + for i := 0; i < 200; i++ { + n := uint64(rand.Intn(99999) % 20) + set(ns, n, n, 1, nil).Release() + if h := ns.Get(n, nil); h != nil { + if h.Value() == nil { + t.Errorf("key '%d' contains nil value", n) + } else { + if x := h.Value().(uint64); x != n { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x) + } + } + h.Release() + } else { + t.Errorf("key '%d' doesn't exist", n) + } + } +} + +func TestLRUCache_Purge(t *testing.T) { + c := NewLRUCache(3) + ns1 := c.GetNamespace(0) + o1 := set(ns1, 1, 1, 1, nil) + o2 := set(ns1, 2, 2, 1, nil) + ns1.Purge(nil) + set(ns1, 3, 3, 1, nil).Release() + for _, key := range []uint64{1, 2, 3} { + h := ns1.Get(key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + o1.Release() + o2.Release() + for _, key := range []uint64{1, 2} { + h := ns1.Get(key, nil) + if h != nil { + t.Errorf("hit for key '%d'", key) + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } +} + +type testingCacheObjectCounter struct { + created uint32 + released uint32 +} + +func (c *testingCacheObjectCounter) createOne() { + atomic.AddUint32(&c.created, 1) +} + +func (c *testingCacheObjectCounter) releaseOne() { + atomic.AddUint32(&c.released, 1) +} + +type testingCacheObject struct { + t *testing.T + cnt *testingCacheObjectCounter + + ns, key uint64 + + releaseCalled uint32 +} + +func (x *testingCacheObject) Release() { + if atomic.CompareAndSwapUint32(&x.releaseCalled, 0, 1) { + x.cnt.releaseOne() + } else { + x.t.Errorf("duplicate setfin NS#%d KEY#%s", x.ns, x.key) + } +} + +func TestLRUCache_Finalizer(t *testing.T) { + const ( + capacity = 100 + goroutines = 100 + iterations = 10000 + keymax = 8000 + ) + + runtime.GOMAXPROCS(runtime.NumCPU()) + defer runtime.GOMAXPROCS(1) + + wg := &sync.WaitGroup{} + cnt := &testingCacheObjectCounter{} + + c := NewLRUCache(capacity) + + type instance struct { + seed int64 + rnd *rand.Rand + ns uint64 + effective int32 + handles []Handle + handlesMap map[uint64]int + + delete bool + purge bool + zap bool + wantDel int32 + delfinCalledAll int32 + delfinCalledEff int32 + purgefinCalled int32 + } + + instanceGet := func(p *instance, ns Namespace, key uint64) { + h := ns.Get(key, func() (charge int, value interface{}) { + to := &testingCacheObject{ + t: t, cnt: cnt, + ns: p.ns, + key: key, + } + atomic.AddInt32(&p.effective, 1) + cnt.createOne() + return 1, releaserFunc{func() { + to.Release() + atomic.AddInt32(&p.effective, -1) + }, to} + }) + p.handles = append(p.handles, h) + p.handlesMap[key] = p.handlesMap[key] + 1 + } + instanceRelease := func(p *instance, ns Namespace, i int) { + h := p.handles[i] + key := h.Value().(releaserFunc).value.(*testingCacheObject).key + if n := p.handlesMap[key]; n == 0 { + t.Fatal("key ref == 0") + } else if n > 1 { + p.handlesMap[key] = n - 1 + } else { + delete(p.handlesMap, key) + } + h.Release() + p.handles = append(p.handles[:i], p.handles[i+1:]...) + p.handles[len(p.handles) : len(p.handles)+1][0] = nil + } + + seeds := make([]int64, goroutines) + instances := make([]instance, goroutines) + for i := range instances { + p := &instances[i] + p.handlesMap = make(map[uint64]int) + if seeds[i] == 0 { + seeds[i] = time.Now().UnixNano() + } + p.seed = seeds[i] + p.rnd = rand.New(rand.NewSource(p.seed)) + p.ns = uint64(i) + p.delete = i%6 == 0 + p.purge = i%8 == 0 + p.zap = i%12 == 0 || i%3 == 0 + } + + seedsStr := make([]string, len(seeds)) + for i, seed := range seeds { + seedsStr[i] = fmt.Sprint(seed) + } + t.Logf("seeds := []int64{%s}", strings.Join(seedsStr, ", ")) + + // Get and release. + for i := range instances { + p := &instances[i] + + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + for i := 0; i < iterations; i++ { + if len(p.handles) == 0 || p.rnd.Int()%2 == 0 { + instanceGet(p, ns, uint64(p.rnd.Intn(keymax))) + } else { + instanceRelease(p, ns, p.rnd.Intn(len(p.handles))) + } + } + }(p) + } + wg.Wait() + + if used, cap := c.Used(), c.Capacity(); used > cap { + t.Errorf("Used > capacity, used=%d cap=%d", used, cap) + } + + // Check effective objects. + for i := range instances { + p := &instances[i] + if int(p.effective) < len(p.handlesMap) { + t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap)) + } + } + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + // Delete and purge. + for i := range instances { + p := &instances[i] + p.wantDel = p.effective + + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + + if p.delete { + for key := uint64(0); key < keymax; key++ { + _, wantExist := p.handlesMap[key] + gotExist := ns.Delete(key, func(exist, pending bool) { + atomic.AddInt32(&p.delfinCalledAll, 1) + if exist { + atomic.AddInt32(&p.delfinCalledEff, 1) + } + }) + if !gotExist && wantExist { + t.Errorf("delete on NS#%d KEY#%d not found", p.ns, key) + } + } + + var delfinCalled int + for key := uint64(0); key < keymax; key++ { + func(key uint64) { + gotExist := ns.Delete(key, func(exist, pending bool) { + if exist && !pending { + t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.ns, key) + } + delfinCalled++ + }) + if gotExist { + t.Errorf("delete on NS#%d KEY#%d found", p.ns, key) + } + }(key) + } + if delfinCalled != keymax { + t.Errorf("(2) #%d not all delete fin called, diff=%d", p.ns, keymax-delfinCalled) + } + } + + if p.purge { + ns.Purge(func(ns, key uint64) { + atomic.AddInt32(&p.purgefinCalled, 1) + }) + } + }(p) + } + wg.Wait() + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + // Release. + for i := range instances { + p := &instances[i] + + if !p.zap { + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + for i := len(p.handles) - 1; i >= 0; i-- { + instanceRelease(p, ns, i) + } + }(p) + } + } + wg.Wait() + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + // Zap. + for i := range instances { + p := &instances[i] + + if p.zap { + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + ns.Zap() + + p.handles = nil + p.handlesMap = nil + }(p) + } + } + wg.Wait() + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used { + t.Errorf("Invalid used value, want=%d got=%d", notrel, used) + } + + c.Purge(nil) + + for i := range instances { + p := &instances[i] + + if p.delete { + if p.delfinCalledAll != keymax { + t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.ns, p.purge, p.zap, keymax-p.delfinCalledAll) + } + if p.delfinCalledEff != p.wantDel { + t.Errorf("#%d not all effective delete fin called, diff=%d", p.ns, p.wantDel-p.delfinCalledEff) + } + if p.purge && p.purgefinCalled > 0 { + t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.ns, p.delete, p.zap, p.purgefinCalled) + } + } else { + if p.purge { + if p.purgefinCalled != p.wantDel { + t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.ns, p.delete, p.zap, p.wantDel-p.purgefinCalled) + } + } + } + } + + if cnt.created != cnt.released { + t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released) + } +} + +func BenchmarkLRUCache_Set(b *testing.B) { + c := NewLRUCache(0) + ns := c.GetNamespace(0) + b.ResetTimer() + for i := uint64(0); i < uint64(b.N); i++ { + set(ns, i, "", 1, nil) + } +} + +func BenchmarkLRUCache_Get(b *testing.B) { + c := NewLRUCache(0) + ns := c.GetNamespace(0) + b.ResetTimer() + for i := uint64(0); i < uint64(b.N); i++ { + set(ns, i, "", 1, nil) + } + b.ResetTimer() + for i := uint64(0); i < uint64(b.N); i++ { + ns.Get(i, nil) + } +} + +func BenchmarkLRUCache_Get2(b *testing.B) { + c := NewLRUCache(0) + ns := c.GetNamespace(0) + b.ResetTimer() + for i := uint64(0); i < uint64(b.N); i++ { + set(ns, i, "", 1, nil) + } + b.ResetTimer() + for i := uint64(0); i < uint64(b.N); i++ { + ns.Get(i, func() (charge int, value interface{}) { + return 0, nil + }) + } +} + +func BenchmarkLRUCache_Release(b *testing.B) { + c := NewLRUCache(0) + ns := c.GetNamespace(0) + handles := make([]Handle, b.N) + for i := uint64(0); i < uint64(b.N); i++ { + handles[i] = set(ns, i, "", 1, nil) + } + b.ResetTimer() + for _, h := range handles { + h.Release() + } +} + +func BenchmarkLRUCache_SetRelease(b *testing.B) { + capacity := b.N / 100 + if capacity <= 0 { + capacity = 10 + } + c := NewLRUCache(capacity) + ns := c.GetNamespace(0) + b.ResetTimer() + for i := uint64(0); i < uint64(b.N); i++ { + set(ns, i, "", 1, nil).Release() + } +} + +func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) { + capacity := b.N / 100 + if capacity <= 0 { + capacity = 10 + } + c := NewLRUCache(capacity) + ns := c.GetNamespace(0) + b.ResetTimer() + + na := b.N / 2 + nb := b.N - na + + for i := uint64(0); i < uint64(na); i++ { + set(ns, i, "", 1, nil).Release() + } + + for i := uint64(0); i < uint64(nb); i++ { + set(ns, i, "", 1, nil).Release() + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go new file mode 100644 index 0000000..164b4bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go @@ -0,0 +1,613 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "sync" + "sync/atomic" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// The LLRB implementation were taken from https://github.com/petar/GoLLRB. +// Which contains the following header: +// +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// lruCache represent a LRU cache state. +type lruCache struct { + mu sync.Mutex + recent lruNode + table map[uint64]*lruNs + capacity int + used, size, alive int +} + +// NewLRUCache creates a new initialized LRU cache with the given capacity. +func NewLRUCache(capacity int) Cache { + c := &lruCache{ + table: make(map[uint64]*lruNs), + capacity: capacity, + } + c.recent.rNext = &c.recent + c.recent.rPrev = &c.recent + return c +} + +func (c *lruCache) Capacity() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.capacity +} + +func (c *lruCache) Used() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.used +} + +func (c *lruCache) Size() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.size +} + +func (c *lruCache) NumObjects() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.alive +} + +// SetCapacity set cache capacity. +func (c *lruCache) SetCapacity(capacity int) { + c.mu.Lock() + c.capacity = capacity + c.evict() + c.mu.Unlock() +} + +// GetNamespace return namespace object for given id. +func (c *lruCache) GetNamespace(id uint64) Namespace { + c.mu.Lock() + defer c.mu.Unlock() + + if ns, ok := c.table[id]; ok { + return ns + } + + ns := &lruNs{lru: c, id: id} + c.table[id] = ns + return ns +} + +func (c *lruCache) ZapNamespace(id uint64) { + c.mu.Lock() + if ns, exist := c.table[id]; exist { + ns.zapNB() + delete(c.table, id) + } + c.mu.Unlock() +} + +func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) { + c.mu.Lock() + if ns, exist := c.table[id]; exist { + ns.purgeNB(fin) + } + c.mu.Unlock() +} + +// Purge purge entire cache. +func (c *lruCache) Purge(fin PurgeFin) { + c.mu.Lock() + for _, ns := range c.table { + ns.purgeNB(fin) + } + c.mu.Unlock() +} + +func (c *lruCache) Zap() { + c.mu.Lock() + for _, ns := range c.table { + ns.zapNB() + } + c.table = make(map[uint64]*lruNs) + c.mu.Unlock() +} + +func (c *lruCache) evict() { + top := &c.recent + for n := c.recent.rPrev; c.used > c.capacity && n != top; { + n.state = nodeEvicted + n.rRemove() + n.derefNB() + c.used -= n.charge + n = c.recent.rPrev + } +} + +type lruNs struct { + lru *lruCache + id uint64 + rbRoot *lruNode + state nsState +} + +func (ns *lruNs) rbGetOrCreateNode(h *lruNode, key uint64) (hn, n *lruNode) { + if h == nil { + n = &lruNode{ns: ns, key: key} + return n, n + } + + if key < h.key { + hn, n = ns.rbGetOrCreateNode(h.rbLeft, key) + if hn != nil { + h.rbLeft = hn + } else { + return nil, n + } + } else if key > h.key { + hn, n = ns.rbGetOrCreateNode(h.rbRight, key) + if hn != nil { + h.rbRight = hn + } else { + return nil, n + } + } else { + return nil, h + } + + if rbIsRed(h.rbRight) && !rbIsRed(h.rbLeft) { + h = rbRotLeft(h) + } + if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) { + h = rbRotRight(h) + } + if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) { + rbFlip(h) + } + return h, n +} + +func (ns *lruNs) getOrCreateNode(key uint64) *lruNode { + hn, n := ns.rbGetOrCreateNode(ns.rbRoot, key) + if hn != nil { + ns.rbRoot = hn + ns.rbRoot.rbBlack = true + } + return n +} + +func (ns *lruNs) rbGetNode(key uint64) *lruNode { + h := ns.rbRoot + for h != nil { + switch { + case key < h.key: + h = h.rbLeft + case key > h.key: + h = h.rbRight + default: + return h + } + } + return nil +} + +func (ns *lruNs) getNode(key uint64) *lruNode { + return ns.rbGetNode(key) +} + +func (ns *lruNs) rbDeleteNode(h *lruNode, key uint64) *lruNode { + if h == nil { + return nil + } + + if key < h.key { + if h.rbLeft == nil { // key not present. Nothing to delete + return h + } + if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) { + h = rbMoveLeft(h) + } + h.rbLeft = ns.rbDeleteNode(h.rbLeft, key) + } else { + if rbIsRed(h.rbLeft) { + h = rbRotRight(h) + } + // If @key equals @h.key and no right children at @h + if h.key == key && h.rbRight == nil { + return nil + } + if h.rbRight != nil && !rbIsRed(h.rbRight) && !rbIsRed(h.rbRight.rbLeft) { + h = rbMoveRight(h) + } + // If @key equals @h.key, and (from above) 'h.Right != nil' + if h.key == key { + var x *lruNode + h.rbRight, x = rbDeleteMin(h.rbRight) + if x == nil { + panic("logic") + } + x.rbLeft, h.rbLeft = h.rbLeft, nil + x.rbRight, h.rbRight = h.rbRight, nil + x.rbBlack = h.rbBlack + h = x + } else { // Else, @key is bigger than @h.key + h.rbRight = ns.rbDeleteNode(h.rbRight, key) + } + } + + return rbFixup(h) +} + +func (ns *lruNs) deleteNode(key uint64) { + ns.rbRoot = ns.rbDeleteNode(ns.rbRoot, key) + if ns.rbRoot != nil { + ns.rbRoot.rbBlack = true + } +} + +func (ns *lruNs) rbIterateNodes(h *lruNode, pivot uint64, iter func(n *lruNode) bool) bool { + if h == nil { + return true + } + if h.key >= pivot { + if !ns.rbIterateNodes(h.rbLeft, pivot, iter) { + return false + } + if !iter(h) { + return false + } + } + return ns.rbIterateNodes(h.rbRight, pivot, iter) +} + +func (ns *lruNs) iterateNodes(iter func(n *lruNode) bool) { + ns.rbIterateNodes(ns.rbRoot, 0, iter) +} + +func (ns *lruNs) Get(key uint64, setf SetFunc) Handle { + ns.lru.mu.Lock() + defer ns.lru.mu.Unlock() + + if ns.state != nsEffective { + return nil + } + + var n *lruNode + if setf == nil { + n = ns.getNode(key) + if n == nil { + return nil + } + } else { + n = ns.getOrCreateNode(key) + } + switch n.state { + case nodeZero: + charge, value := setf() + if value == nil { + ns.deleteNode(key) + return nil + } + if charge < 0 { + charge = 0 + } + + n.value = value + n.charge = charge + n.state = nodeEvicted + + ns.lru.size += charge + ns.lru.alive++ + + fallthrough + case nodeEvicted: + if n.charge == 0 { + break + } + + // Insert to recent list. + n.state = nodeEffective + n.ref++ + ns.lru.used += n.charge + ns.lru.evict() + + fallthrough + case nodeEffective: + // Bump to front. + n.rRemove() + n.rInsert(&ns.lru.recent) + } + n.ref++ + + return &lruHandle{node: n} +} + +func (ns *lruNs) Delete(key uint64, fin DelFin) bool { + ns.lru.mu.Lock() + defer ns.lru.mu.Unlock() + + if ns.state != nsEffective { + if fin != nil { + fin(false, false) + } + return false + } + + n := ns.getNode(key) + if n == nil { + if fin != nil { + fin(false, false) + } + return false + + } + + switch n.state { + case nodeEffective: + ns.lru.used -= n.charge + n.state = nodeDeleted + n.delfin = fin + n.rRemove() + n.derefNB() + case nodeEvicted: + n.state = nodeDeleted + n.delfin = fin + case nodeDeleted: + if fin != nil { + fin(true, true) + } + return false + default: + panic("invalid state") + } + + return true +} + +func (ns *lruNs) purgeNB(fin PurgeFin) { + if ns.state == nsEffective { + var nodes []*lruNode + ns.iterateNodes(func(n *lruNode) bool { + nodes = append(nodes, n) + return true + }) + for _, n := range nodes { + switch n.state { + case nodeEffective: + ns.lru.used -= n.charge + n.state = nodeDeleted + n.purgefin = fin + n.rRemove() + n.derefNB() + case nodeEvicted: + n.state = nodeDeleted + n.purgefin = fin + case nodeDeleted: + default: + panic("invalid state") + } + } + } +} + +func (ns *lruNs) Purge(fin PurgeFin) { + ns.lru.mu.Lock() + ns.purgeNB(fin) + ns.lru.mu.Unlock() +} + +func (ns *lruNs) zapNB() { + if ns.state == nsEffective { + ns.state = nsZapped + + ns.iterateNodes(func(n *lruNode) bool { + if n.state == nodeEffective { + ns.lru.used -= n.charge + n.rRemove() + } + ns.lru.size -= n.charge + n.state = nodeDeleted + n.fin() + + return true + }) + ns.rbRoot = nil + } +} + +func (ns *lruNs) Zap() { + ns.lru.mu.Lock() + ns.zapNB() + delete(ns.lru.table, ns.id) + ns.lru.mu.Unlock() +} + +type lruNode struct { + ns *lruNs + + rNext, rPrev *lruNode + rbLeft, rbRight *lruNode + rbBlack bool + + key uint64 + value interface{} + charge int + ref int + state nodeState + delfin DelFin + purgefin PurgeFin +} + +func (n *lruNode) rInsert(at *lruNode) { + x := at.rNext + at.rNext = n + n.rPrev = at + n.rNext = x + x.rPrev = n +} + +func (n *lruNode) rRemove() bool { + if n.rPrev == nil { + return false + } + + n.rPrev.rNext = n.rNext + n.rNext.rPrev = n.rPrev + n.rPrev = nil + n.rNext = nil + + return true +} + +func (n *lruNode) fin() { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + if n.purgefin != nil { + n.purgefin(n.ns.id, n.key) + n.delfin = nil + n.purgefin = nil + } else if n.delfin != nil { + n.delfin(true, false) + n.delfin = nil + } +} + +func (n *lruNode) derefNB() { + n.ref-- + if n.ref == 0 { + if n.ns.state == nsEffective { + // Remove elemement. + n.ns.deleteNode(n.key) + n.ns.lru.size -= n.charge + n.ns.lru.alive-- + n.fin() + } + n.value = nil + } else if n.ref < 0 { + panic("leveldb/cache: lruCache: negative node reference") + } +} + +func (n *lruNode) deref() { + n.ns.lru.mu.Lock() + n.derefNB() + n.ns.lru.mu.Unlock() +} + +type lruHandle struct { + node *lruNode + once uint32 +} + +func (h *lruHandle) Value() interface{} { + if atomic.LoadUint32(&h.once) == 0 { + return h.node.value + } + return nil +} + +func (h *lruHandle) Release() { + if !atomic.CompareAndSwapUint32(&h.once, 0, 1) { + return + } + h.node.deref() + h.node = nil +} + +func rbIsRed(h *lruNode) bool { + if h == nil { + return false + } + return !h.rbBlack +} + +func rbRotLeft(h *lruNode) *lruNode { + x := h.rbRight + if x.rbBlack { + panic("rotating a black link") + } + h.rbRight = x.rbLeft + x.rbLeft = h + x.rbBlack = h.rbBlack + h.rbBlack = false + return x +} + +func rbRotRight(h *lruNode) *lruNode { + x := h.rbLeft + if x.rbBlack { + panic("rotating a black link") + } + h.rbLeft = x.rbRight + x.rbRight = h + x.rbBlack = h.rbBlack + h.rbBlack = false + return x +} + +func rbFlip(h *lruNode) { + h.rbBlack = !h.rbBlack + h.rbLeft.rbBlack = !h.rbLeft.rbBlack + h.rbRight.rbBlack = !h.rbRight.rbBlack +} + +func rbMoveLeft(h *lruNode) *lruNode { + rbFlip(h) + if rbIsRed(h.rbRight.rbLeft) { + h.rbRight = rbRotRight(h.rbRight) + h = rbRotLeft(h) + rbFlip(h) + } + return h +} + +func rbMoveRight(h *lruNode) *lruNode { + rbFlip(h) + if rbIsRed(h.rbLeft.rbLeft) { + h = rbRotRight(h) + rbFlip(h) + } + return h +} + +func rbFixup(h *lruNode) *lruNode { + if rbIsRed(h.rbRight) { + h = rbRotLeft(h) + } + + if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) { + h = rbRotRight(h) + } + + if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) { + rbFlip(h) + } + + return h +} + +func rbDeleteMin(h *lruNode) (hn, n *lruNode) { + if h == nil { + return nil, nil + } + if h.rbLeft == nil { + return nil, h + } + + if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) { + h = rbMoveLeft(h) + } + + h.rbLeft, n = rbDeleteMin(h.rbLeft) + + return rbFixup(h), n +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go new file mode 100644 index 0000000..a92d3a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go @@ -0,0 +1,75 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + +type iComparer struct { + ucmp comparer.Comparer +} + +func (icmp *iComparer) uName() string { + return icmp.ucmp.Name() +} + +func (icmp *iComparer) uCompare(a, b []byte) int { + return icmp.ucmp.Compare(a, b) +} + +func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { + return icmp.ucmp.Separator(dst, a, b) +} + +func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { + return icmp.ucmp.Successor(dst, b) +} + +func (icmp *iComparer) Name() string { + return icmp.uName() +} + +func (icmp *iComparer) Compare(a, b []byte) int { + x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) + if x == 0 { + if m, n := iKey(a).num(), iKey(b).num(); m > n { + x = -1 + } else if m < n { + x = 1 + } + } + return x +} + +func (icmp *iComparer) Separator(dst, a, b []byte) []byte { + ua, ub := iKey(a).ukey(), iKey(b).ukey() + dst = icmp.ucmp.Separator(dst, ua, ub) + if dst == nil { + return nil + } + if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { + dst = append(dst, kMaxNumBytes...) + } else { + // Did not close possibilities that n maybe longer than len(ub). + dst = append(dst, a[len(a)-8:]...) + } + return dst +} + +func (icmp *iComparer) Successor(dst, b []byte) []byte { + ub := iKey(b).ukey() + dst = icmp.ucmp.Successor(dst, ub) + if dst == nil { + return nil + } + if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { + dst = append(dst, kMaxNumBytes...) + } else { + // Did not close possibilities that n maybe longer than len(ub). + dst = append(dst, b[len(b)-8:]...) + } + return dst +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go new file mode 100644 index 0000000..14dddf8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go @@ -0,0 +1,51 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package comparer + +import "bytes" + +type bytesComparer struct{} + +func (bytesComparer) Compare(a, b []byte) int { + return bytes.Compare(a, b) +} + +func (bytesComparer) Name() string { + return "leveldb.BytewiseComparator" +} + +func (bytesComparer) Separator(dst, a, b []byte) []byte { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for ; i < n && a[i] == b[i]; i++ { + } + if i >= n { + // Do not shorten if one string is a prefix of the other + } else if c := a[i]; c < 0xff && c+1 < b[i] { + dst = append(dst, a[:i+1]...) + dst[i]++ + return dst + } + return nil +} + +func (bytesComparer) Successor(dst, b []byte) []byte { + for i, c := range b { + if c != 0xff { + dst = append(dst, b[:i+1]...) + dst[i]++ + return dst + } + } + return nil +} + +// DefaultComparer are default implementation of the Comparer interface. +// It uses the natural ordering, consistent with bytes.Compare. +var DefaultComparer = bytesComparer{} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go new file mode 100644 index 0000000..14a28f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go @@ -0,0 +1,57 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package comparer provides interface and implementation for ordering +// sets of data. +package comparer + +// BasicComparer is the interface that wraps the basic Compare method. +type BasicComparer interface { + // Compare returns -1, 0, or +1 depending on whether a is 'less than', + // 'equal to' or 'greater than' b. The two arguments can only be 'equal' + // if their contents are exactly equal. Furthermore, the empty slice + // must be 'less than' any non-empty slice. + Compare(a, b []byte) int +} + +// Comparer defines a total ordering over the space of []byte keys: a 'less +// than' relationship. +type Comparer interface { + BasicComparer + + // Name returns name of the comparer. + // + // The Level-DB on-disk format stores the comparer name, and opening a + // database with a different comparer from the one it was created with + // will result in an error. + // + // An implementation to a new name whenever the comparer implementation + // changes in a way that will cause the relative ordering of any two keys + // to change. + // + // Names starting with "leveldb." are reserved and should not be used + // by any users of this package. + Name() string + + // Bellow are advanced functions used used to reduce the space requirements + // for internal data structures such as index blocks. + + // Separator appends a sequence of bytes x to dst such that a <= x && x < b, + // where 'less than' is consistent with Compare. An implementation should + // return nil if x equal to a. + // + // Either contents of a or b should not by any means modified. Doing so + // may cause corruption on the internal state. + Separator(dst, a, b []byte) []byte + + // Successor appends a sequence of bytes x to dst such that x >= b, where + // 'less than' is consistent with Compare. An implementation should return + // nil if x equal to b. + // + // Contents of b should not by any means modified. Doing so may cause + // corruption on the internal state. + Successor(dst, b []byte) []byte +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go new file mode 100644 index 0000000..5110588 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +const ( + kNumLevels = 7 + + // Level-0 compaction is started when we hit this many files. + kL0_CompactionTrigger float64 = 4 + + // Soft limit on number of level-0 files. We slow down writes at this point. + kL0_SlowdownWritesTrigger = 8 + + // Maximum number of level-0 files. We stop writes at this point. + kL0_StopWritesTrigger = 12 + + // Maximum level to which a new compacted memdb is pushed if it + // does not create overlap. We try to push to level 2 to avoid the + // relatively expensive level 0=>1 compactions and to avoid some + // expensive manifest file operations. We do not push all the way to + // the largest level since that can generate a lot of wasted disk + // space if the same key space is being repeatedly overwritten. + kMaxMemCompactLevel = 2 + + // Maximum size of a table. + kMaxTableSize = 2 * 1048576 + + // Maximum bytes of overlaps in grandparent (i.e., level+2) before we + // stop building a single file in a level->level+1 compaction. + kMaxGrandParentOverlapBytes = 10 * kMaxTableSize + + // Maximum number of bytes in all compacted files. We avoid expanding + // the lower level file set of a compaction if it would make the + // total compaction cover more than this many bytes. + kExpCompactionMaxBytes = 25 * kMaxTableSize +) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go new file mode 100644 index 0000000..137bb1f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go @@ -0,0 +1,472 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "fmt" + "io" + "math/rand" + "testing" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +const ctValSize = 1000 + +type dbCorruptHarness struct { + dbHarness +} + +func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { + h := new(dbCorruptHarness) + h.init(t, o) + return h +} + +func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { + return newDbCorruptHarnessWopt(t, &opt.Options{ + BlockCache: cache.NewLRUCache(100), + Strict: opt.StrictJournalChecksum, + }) +} + +func (h *dbCorruptHarness) recover() { + p := &h.dbHarness + t := p.t + + var err error + p.db, err = Recover(h.stor, h.o) + if err != nil { + t.Fatal("Repair: got error: ", err) + } +} + +func (h *dbCorruptHarness) build(n int) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := 0; i < n; i++ { + batch.Reset() + batch.Put(tkey(i), tval(i, ctValSize)) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := range rnd.Perm(n) { + batch.Reset() + batch.Put(tkey(i), tval(i, ctValSize)) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := 0; i < n; i++ { + batch.Reset() + batch.Delete(tkey(rnd.Intn(max))) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) corrupt(ft storage.FileType, offset, n int) { + p := &h.dbHarness + t := p.t + + var file storage.File + ff, _ := p.stor.GetFiles(ft) + for _, f := range ff { + if file == nil || f.Num() > file.Num() { + file = f + } + } + if file == nil { + t.Fatalf("no such file with type %q", ft) + } + + r, err := file.Open() + if err != nil { + t.Fatal("cannot open file: ", err) + } + x, err := r.Seek(0, 2) + if err != nil { + t.Fatal("cannot query file size: ", err) + } + m := int(x) + if _, err := r.Seek(0, 0); err != nil { + t.Fatal(err) + } + + if offset < 0 { + if -offset > m { + offset = 0 + } else { + offset = m + offset + } + } + if offset > m { + offset = m + } + if offset+n > m { + n = m - offset + } + + buf := make([]byte, m) + _, err = io.ReadFull(r, buf) + if err != nil { + t.Fatal("cannot read file: ", err) + } + r.Close() + + for i := 0; i < n; i++ { + buf[offset+i] ^= 0x80 + } + + err = file.Remove() + if err != nil { + t.Fatal("cannot remove old file: ", err) + } + w, err := file.Create() + if err != nil { + t.Fatal("cannot create new file: ", err) + } + _, err = w.Write(buf) + if err != nil { + t.Fatal("cannot write new file: ", err) + } + w.Close() +} + +func (h *dbCorruptHarness) removeAll(ft storage.FileType) { + ff, err := h.stor.GetFiles(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + for _, f := range ff { + if err := f.Remove(); err != nil { + h.t.Error("remove file: ", err) + } + } +} + +func (h *dbCorruptHarness) removeOne(ft storage.FileType) { + ff, err := h.stor.GetFiles(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + f := ff[rand.Intn(len(ff))] + h.t.Logf("removing file @%d", f.Num()) + if err := f.Remove(); err != nil { + h.t.Error("remove file: ", err) + } +} + +func (h *dbCorruptHarness) check(min, max int) { + p := &h.dbHarness + t := p.t + db := p.db + + var n, badk, badv, missed, good int + iter := db.NewIterator(nil, p.ro) + for iter.Next() { + k := 0 + fmt.Sscanf(string(iter.Key()), "%d", &k) + if k < n { + badk++ + continue + } + missed += k - n + n = k + 1 + if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { + badv++ + } else { + good++ + } + } + err := iter.Error() + iter.Release() + t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", + min, max, good, badk, badv, missed, err) + if good < min || good > max { + t.Errorf("good entries number not in range") + } +} + +func TestCorruptDB_Journal(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(100) + h.check(100, 100) + h.closeDB() + h.corrupt(storage.TypeJournal, 19, 1) + h.corrupt(storage.TypeJournal, 32*1024+1000, 1) + + h.openDB() + h.check(36, 36) + + h.close() +} + +func TestCorruptDB_Table(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(100) + h.compactMem() + h.compactRangeAt(0, "", "") + h.compactRangeAt(1, "", "") + h.closeDB() + h.corrupt(storage.TypeTable, 100, 1) + + h.openDB() + h.check(99, 99) + + h.close() +} + +func TestCorruptDB_TableIndex(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(10000) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, -2000, 500) + + h.openDB() + h.check(5000, 9999) + + h.close() +} + +func TestCorruptDB_MissingManifest(t *testing.T) { + rnd := rand.New(rand.NewSource(0x0badda7a)) + h := newDbCorruptHarnessWopt(t, &opt.Options{ + BlockCache: cache.NewLRUCache(100), + Strict: opt.StrictJournalChecksum, + WriteBuffer: 1000 * 60, + }) + + h.build(1000) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.deleteRand(500, 1000, rnd) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.deleteRand(500, 1000, rnd) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.closeDB() + + h.stor.SetIgnoreOpenErr(storage.TypeManifest) + h.removeAll(storage.TypeManifest) + h.openAssert(false) + h.stor.SetIgnoreOpenErr(0) + + h.recover() + h.check(1000, 1000) + h.build(1000) + h.compactMem() + h.compactRange("", "") + h.closeDB() + + h.recover() + h.check(1000, 1000) + + h.close() +} + +func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("foo", "v1") + h.put("foo", "v2") + h.put("foo", "v3") + h.put("foo", "v4") + h.put("foo", "v5") + h.closeDB() + + h.recover() + h.getVal("foo", "v5") + h.put("foo", "v6") + h.getVal("foo", "v6") + + h.reopenDB() + h.getVal("foo", "v6") + + h.close() +} + +func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("foo", "v1") + h.put("foo", "v2") + h.put("foo", "v3") + h.compactMem() + h.put("foo", "v4") + h.put("foo", "v5") + h.compactMem() + h.closeDB() + + h.recover() + h.getVal("foo", "v5") + h.put("foo", "v6") + h.getVal("foo", "v6") + + h.reopenDB() + h.getVal("foo", "v6") + + h.close() +} + +func TestCorruptDB_CorruptedManifest(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("foo", "hello") + h.compactMem() + h.compactRange("", "") + h.closeDB() + h.corrupt(storage.TypeManifest, 0, 1000) + h.openAssert(false) + + h.recover() + h.getVal("foo", "hello") + + h.close() +} + +func TestCorruptDB_CompactionInputError(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(10) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, 100, 1) + + h.openDB() + h.check(9, 9) + + h.build(10000) + h.check(10000, 10000) + + h.close() +} + +func TestCorruptDB_UnrelatedKeys(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(10) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, 100, 1) + + h.openDB() + h.put(string(tkey(1000)), string(tval(1000, ctValSize))) + h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) + h.compactMem() + h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) + + h.close() +} + +func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("a", "v2") + h.put("b", "v2") + h.compactMem() + h.put("a", "v3") + h.put("b", "v3") + h.compactMem() + h.put("c", "v0") + h.put("d", "v0") + h.compactMem() + h.compactRangeAt(1, "", "") + h.closeDB() + + h.recover() + h.getVal("a", "v3") + h.getVal("b", "v3") + h.getVal("c", "v0") + h.getVal("d", "v0") + + h.close() +} + +func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("a", "v2") + h.put("b", "v2") + h.compactMem() + h.put("a", "v3") + h.put("b", "v3") + h.compactMem() + h.put("c", "v0") + h.put("d", "v0") + h.compactMem() + h.compactRangeAt(0, "", "") + h.closeDB() + + h.recover() + h.getVal("a", "v3") + h.getVal("b", "v3") + h.getVal("c", "v0") + h.getVal("d", "v0") + + h.close() +} + +func TestCorruptDB_MissingTableFiles(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("c", "v2") + h.put("d", "v2") + h.compactMem() + h.put("e", "v3") + h.put("f", "v3") + h.closeDB() + + h.removeOne(storage.TypeTable) + h.openAssert(false) + + h.close() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go new file mode 100644 index 0000000..aad44b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go @@ -0,0 +1,834 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// DB is a LevelDB database. +type DB struct { + // Need 64-bit alignment. + seq uint64 + + // Session. + s *session + + // MemDB. + memMu sync.RWMutex + memPool chan *memdb.DB + mem, frozenMem *memDB + journal *journal.Writer + journalWriter storage.Writer + journalFile storage.File + frozenJournalFile storage.File + frozenSeq uint64 + + // Snapshot. + snapsMu sync.Mutex + snapsRoot snapshotElement + + // Stats. + aliveSnaps, aliveIters int32 + + // Write. + writeC chan *Batch + writeMergedC chan bool + writeLockC chan struct{} + writeAckC chan error + journalC chan *Batch + journalAckC chan error + + // Compaction. + tcompCmdC chan cCmd + tcompPauseC chan chan<- struct{} + tcompTriggerC chan struct{} + mcompCmdC chan cCmd + mcompTriggerC chan struct{} + compErrC chan error + compErrSetC chan error + compStats [kNumLevels]cStats + + // Close. + closeW sync.WaitGroup + closeC chan struct{} + closed uint32 + closer io.Closer +} + +func openDB(s *session) (*DB, error) { + s.log("db@open opening") + start := time.Now() + db := &DB{ + s: s, + // Initial sequence + seq: s.stSeq, + // MemDB + memPool: make(chan *memdb.DB, 1), + // Write + writeC: make(chan *Batch), + writeMergedC: make(chan bool), + writeLockC: make(chan struct{}, 1), + writeAckC: make(chan error), + journalC: make(chan *Batch), + journalAckC: make(chan error), + // Compaction + tcompCmdC: make(chan cCmd), + tcompPauseC: make(chan chan<- struct{}), + tcompTriggerC: make(chan struct{}, 1), + mcompCmdC: make(chan cCmd), + mcompTriggerC: make(chan struct{}, 1), + compErrC: make(chan error), + compErrSetC: make(chan error), + // Close + closeC: make(chan struct{}), + } + db.initSnapshot() + + if err := db.recoverJournal(); err != nil { + return nil, err + } + + // Remove any obsolete files. + if err := db.checkAndCleanFiles(); err != nil { + // Close journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return nil, err + } + + // Don't include compaction error goroutine into wait group. + go db.compactionError() + + db.closeW.Add(3) + go db.tCompaction() + go db.mCompaction() + go db.jWriter() + go db.mpoolDrain() + + s.logf("db@open done T·%v", time.Since(start)) + + runtime.SetFinalizer(db, (*DB).Close) + return db, nil +} + +// Open opens or creates a DB for the given storage. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist Open will returns +// os.ErrExist error. +// +// Open will return an error with type of ErrCorrupted if corruption +// detected in the DB. Corrupted DB can be recovered with Recover +// function. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = s.recover() + if err != nil { + if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { + return + } + err = s.create() + if err != nil { + return + } + } else if s.o.GetErrorIfExist() { + err = os.ErrExist + return + } + + return openDB(s) +} + +// OpenFile opens or creates a DB for the given path. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist OpenFile will returns +// os.ErrExist error. +// +// OpenFile uses standard file-system backed storage implementation as +// desribed in the leveldb/storage package. +// +// OpenFile will return an error with type of ErrCorrupted if corruption +// detected in the DB. Corrupted DB can be recovered with Recover +// function. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func OpenFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path) + if err != nil { + return + } + db, err = Open(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +// Recover recovers and opens a DB with missing or corrupted manifest files +// for the given storage. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = recoverTable(s, o) + if err != nil { + return + } + return openDB(s) +} + +// RecoverFile recovers and opens a DB with missing or corrupted manifest files +// for the given path. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// RecoverFile uses standard file-system backed storage implementation as desribed +// in the leveldb/storage package. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func RecoverFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path) + if err != nil { + return + } + db, err = Recover(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +func recoverTable(s *session, o *opt.Options) error { + // Get all tables and sort it by file number. + tableFiles_, err := s.getFiles(storage.TypeTable) + if err != nil { + return err + } + tableFiles := files(tableFiles_) + tableFiles.sort() + + var mSeq uint64 + var good, corrupted int + rec := new(sessionRecord) + bpool := util.NewBufferPool(o.GetBlockSize() + 5) + buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { + tmp = s.newTemp() + writer, err := tmp.Create() + if err != nil { + return + } + defer func() { + writer.Close() + if err != nil { + tmp.Remove() + tmp = nil + } + }() + + // Copy entries. + tw := table.NewWriter(writer, o) + for iter.Next() { + key := iter.Key() + if validIkey(key) { + err = tw.Append(key, iter.Value()) + if err != nil { + return + } + } + } + err = iter.Error() + if err != nil { + return + } + err = tw.Close() + if err != nil { + return + } + err = writer.Sync() + if err != nil { + return + } + size = int64(tw.BytesLen()) + return + } + recoverTable := func(file storage.File) error { + s.logf("table@recovery recovering @%d", file.Num()) + reader, err := file.Open() + if err != nil { + return err + } + defer reader.Close() + + // Get file size. + size, err := reader.Seek(0, 2) + if err != nil { + return err + } + + var tSeq uint64 + var tgood, tcorrupted, blockerr int + var imin, imax []byte + tr := table.NewReader(reader, size, nil, bpool, o) + iter := tr.NewIterator(nil, nil) + iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) { + s.logf("table@recovery found error @%d %q", file.Num(), err) + blockerr++ + }) + + // Scan the table. + for iter.Next() { + key := iter.Key() + _, seq, _, ok := parseIkey(key) + if !ok { + tcorrupted++ + continue + } + tgood++ + if seq > tSeq { + tSeq = seq + } + if imin == nil { + imin = append([]byte{}, key...) + } + imax = append(imax[:0], key...) + } + if err := iter.Error(); err != nil { + iter.Release() + return err + } + iter.Release() + + if tgood > 0 { + if tcorrupted > 0 || blockerr > 0 { + // Rebuild the table. + s.logf("table@recovery rebuilding @%d", file.Num()) + iter := tr.NewIterator(nil, nil) + tmp, newSize, err := buildTable(iter) + iter.Release() + if err != nil { + return err + } + reader.Close() + if err := file.Replace(tmp); err != nil { + return err + } + size = newSize + } + if tSeq > mSeq { + mSeq = tSeq + } + // Add table to level 0. + rec.addTable(0, file.Num(), uint64(size), imin, imax) + s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq) + } else { + s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size) + } + + good += tgood + corrupted += tcorrupted + + return nil + } + + // Recover all tables. + if len(tableFiles) > 0 { + s.logf("table@recovery F·%d", len(tableFiles)) + + // Mark file number as used. + s.markFileNum(tableFiles[len(tableFiles)-1].Num()) + + for _, file := range tableFiles { + if err := recoverTable(file); err != nil { + return err + } + } + + s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(tableFiles), good, corrupted, mSeq) + } + + // Set sequence number. + rec.setSeq(mSeq + 1) + + // Create new manifest. + if err := s.create(); err != nil { + return err + } + + // Commit. + return s.commit(rec) +} + +func (db *DB) recoverJournal() error { + // Get all tables and sort it by file number. + journalFiles_, err := db.s.getFiles(storage.TypeJournal) + if err != nil { + return err + } + journalFiles := files(journalFiles_) + journalFiles.sort() + + // Discard older journal. + prev := -1 + for i, file := range journalFiles { + if file.Num() >= db.s.stJournalNum { + if prev >= 0 { + i-- + journalFiles[i] = journalFiles[prev] + } + journalFiles = journalFiles[i:] + break + } else if file.Num() == db.s.stPrevJournalNum { + prev = i + } + } + + var jr *journal.Reader + var of storage.File + var mem *memdb.DB + batch := new(Batch) + cm := newCMem(db.s) + buf := new(util.Buffer) + // Options. + strict := db.s.o.GetStrict(opt.StrictJournal) + checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer := db.s.o.GetWriteBuffer() + recoverJournal := func(file storage.File) error { + db.logf("journal@recovery recovering @%d", file.Num()) + reader, err := file.Open() + if err != nil { + return err + } + defer reader.Close() + + // Create/reset journal reader instance. + if jr == nil { + jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) + } else { + jr.Reset(reader, dropper{db.s, file}, strict, checksum) + } + + // Flush memdb and remove obsolete journal file. + if of != nil { + if mem.Len() > 0 { + if err := cm.flush(mem, 0); err != nil { + return err + } + } + if err := cm.commit(file.Num(), db.seq); err != nil { + return err + } + cm.reset() + of.Remove() + of = nil + } + + // Replay journal to memdb. + mem.Reset() + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + continue + } else { + return err + } + } + if err := batch.decode(buf.Bytes()); err != nil { + return err + } + if err := batch.memReplay(mem); err != nil { + return err + } + + // Save sequence number. + db.seq = batch.seq + uint64(batch.len()) + + // Flush it if large enough. + if mem.Size() >= writeBuffer { + if err := cm.flush(mem, 0); err != nil { + return err + } + mem.Reset() + } + } + + of = file + return nil + } + + // Recover all journals. + if len(journalFiles) > 0 { + db.logf("journal@recovery F·%d", len(journalFiles)) + + // Mark file number as used. + db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) + + mem = memdb.New(db.s.icmp, writeBuffer) + for _, file := range journalFiles { + if err := recoverJournal(file); err != nil { + return err + } + } + + // Flush the last journal. + if mem.Len() > 0 { + if err := cm.flush(mem, 0); err != nil { + return err + } + } + } + + // Create a new journal. + if _, err := db.newMem(0); err != nil { + return err + } + + // Commit. + if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { + // Close journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return err + } + + // Remove the last obsolete journal file. + if of != nil { + of.Remove() + } + + return nil +} + +func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { + ikey := newIKey(key, seq, tSeek) + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + mk, mv, me := m.mdb.Find(ikey) + if me == nil { + ukey, _, t, ok := parseIkey(mk) + if ok && db.s.icmp.uCompare(ukey, key) == 0 { + if t == tDel { + return nil, ErrNotFound + } + return append([]byte{}, mv...), nil + } + } else if me != ErrNotFound { + return nil, me + } + } + + v := db.s.version() + value, cSched, err := v.get(ikey, ro) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompTriggerC) + } + return +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contain the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = db.ok() + if err != nil { + return + } + + return db.get(key, db.getSeq(), ro) +} + +// NewIterator returns an iterator for the latest snapshot of the +// uderlying DB. +// The returned iterator is not goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + + snap := db.newSnapshot() + defer snap.Release() + return snap.NewIterator(slice, ro) +} + +// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot +// is a frozen snapshot of a DB state at a particular point in time. The +// content of snapshot are guaranteed to be consistent. +// +// The snapshot must be released after use, by calling Release method. +func (db *DB) GetSnapshot() (*Snapshot, error) { + if err := db.ok(); err != nil { + return nil, err + } + + return db.newSnapshot(), nil +} + +// GetProperty returns value of the given property name. +// +// Property names: +// leveldb.num-files-at-level{n} +// Returns the number of filer at level 'n'. +// leveldb.stats +// Returns statistics of the underlying DB. +// leveldb.sstables +// Returns sstables list for each level. +// leveldb.blockpool +// Returns block pool stats. +// leveldb.cachedblock +// Returns size of cached block. +// leveldb.openedtables +// Returns number of opened tables. +// leveldb.alivesnaps +// Returns number of alive snapshots. +// leveldb.aliveiters +// Returns number of alive iterators. +func (db *DB) GetProperty(name string) (value string, err error) { + err = db.ok() + if err != nil { + return + } + + const prefix = "leveldb." + if !strings.HasPrefix(name, prefix) { + return "", errors.New("leveldb: GetProperty: unknown property: " + name) + } + p := name[len(prefix):] + + v := db.s.version() + defer v.release() + + switch { + case strings.HasPrefix(p, "num-files-at-level"): + var level uint + var rest string + n, _ := fmt.Scanf("%d%s", &level, &rest) + if n != 1 || level >= kNumLevels { + err = errors.New("leveldb: GetProperty: invalid property: " + name) + } else { + value = fmt.Sprint(v.tLen(int(level))) + } + case p == "stats": + value = "Compactions\n" + + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + + "-------+------------+---------------+---------------+---------------+---------------\n" + for level, tables := range v.tables { + duration, read, write := db.compStats[level].get() + if len(tables) == 0 && duration == 0 { + continue + } + value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", + level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), + float64(read)/1048576.0, float64(write)/1048576.0) + } + case p == "sstables": + for level, tables := range v.tables { + value += fmt.Sprintf("--- level %d ---\n", level) + for _, t := range tables { + value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) + } + } + case p == "blockpool": + value = fmt.Sprintf("%v", db.s.tops.bpool) + case p == "cachedblock": + if bc := db.s.o.GetBlockCache(); bc != nil { + value = fmt.Sprintf("%d", bc.Size()) + } else { + value = "" + } + case p == "openedtables": + value = fmt.Sprintf("%d", db.s.tops.cache.Size()) + case p == "alivesnaps": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) + case p == "aliveiters": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) + default: + err = errors.New("leveldb: GetProperty: unknown property: " + name) + } + + return +} + +// SizeOf calculates approximate sizes of the given key ranges. +// The length of the returned sizes are equal with the length of the given +// ranges. The returned sizes measure storage space usage, so if the user +// data compresses by a factor of ten, the returned sizes will be one-tenth +// the size of the corresponding user data size. +// The results may not include the sizes of recently written data. +func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { + if err := db.ok(); err != nil { + return nil, err + } + + v := db.s.version() + defer v.release() + + sizes := make(Sizes, 0, len(ranges)) + for _, r := range ranges { + imin := newIKey(r.Start, kMaxSeq, tSeek) + imax := newIKey(r.Limit, kMaxSeq, tSeek) + start, err := v.offsetOf(imin) + if err != nil { + return nil, err + } + limit, err := v.offsetOf(imax) + if err != nil { + return nil, err + } + var size uint64 + if limit >= start { + size = limit - start + } + sizes = append(sizes, size) + } + + return sizes, nil +} + +// Close closes the DB. This will also releases any outstanding snapshot and +// abort any in-flight compaction. +// +// It is not safe to close a DB until all outstanding iterators are released. +// It is valid to call Close multiple times. Other methods should not be +// called after the DB has been closed. +func (db *DB) Close() error { + if !db.setClosed() { + return ErrClosed + } + + start := time.Now() + db.log("db@close closing") + + // Clear the finalizer. + runtime.SetFinalizer(db, nil) + + // Get compaction error. + var err error + select { + case err = <-db.compErrC: + default: + } + + close(db.closeC) + + // Wait for the close WaitGroup. + db.closeW.Wait() + + // Close journal. + db.writeLockC <- struct{}{} + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + + // Close session. + db.s.close() + db.logf("db@close done T·%v", time.Since(start)) + db.s.release() + + if db.closer != nil { + if err1 := db.closer.Close(); err == nil { + err = err1 + } + } + + // NIL'ing pointers. + db.s = nil + db.mem = nil + db.frozenMem = nil + db.journal = nil + db.journalWriter = nil + db.journalFile = nil + db.frozenJournalFile = nil + db.snapsRoot = snapshotElement{} + db.closer = nil + + return err +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go new file mode 100644 index 0000000..72163ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -0,0 +1,689 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync" + "time" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +var ( + errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") +) + +type cStats struct { + sync.Mutex + duration time.Duration + read uint64 + write uint64 +} + +func (p *cStats) add(n *cStatsStaging) { + p.Lock() + p.duration += n.duration + p.read += n.read + p.write += n.write + p.Unlock() +} + +func (p *cStats) get() (duration time.Duration, read, write uint64) { + p.Lock() + defer p.Unlock() + return p.duration, p.read, p.write +} + +type cStatsStaging struct { + start time.Time + duration time.Duration + on bool + read uint64 + write uint64 +} + +func (p *cStatsStaging) startTimer() { + if !p.on { + p.start = time.Now() + p.on = true + } +} + +func (p *cStatsStaging) stopTimer() { + if p.on { + p.duration += time.Since(p.start) + p.on = false + } +} + +type cMem struct { + s *session + level int + rec *sessionRecord +} + +func newCMem(s *session) *cMem { + return &cMem{s: s, rec: new(sessionRecord)} +} + +func (c *cMem) flush(mem *memdb.DB, level int) error { + s := c.s + + // Write memdb to table. + iter := mem.NewIterator(nil) + defer iter.Release() + t, n, err := s.tops.createFrom(iter) + if err != nil { + return err + } + + // Pick level. + if level < 0 { + level = s.version_NB().pickLevel(t.imin.ukey(), t.imax.ukey()) + } + c.rec.addTableFile(level, t) + + s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) + + c.level = level + return nil +} + +func (c *cMem) reset() { + c.rec = new(sessionRecord) +} + +func (c *cMem) commit(journal, seq uint64) error { + c.rec.setJournalNum(journal) + c.rec.setSeq(seq) + + // Commit changes. + return c.s.commit(c.rec) +} + +func (db *DB) compactionError() { + var err error +noerr: + for { + select { + case err = <-db.compErrSetC: + if err != nil { + goto haserr + } + case _, _ = <-db.closeC: + return + } + } +haserr: + for { + select { + case db.compErrC <- err: + case err = <-db.compErrSetC: + if err == nil { + goto noerr + } + case _, _ = <-db.closeC: + return + } + } +} + +type compactionTransactCounter int + +func (cnt *compactionTransactCounter) incr() { + *cnt++ +} + +func (db *DB) compactionTransact(name string, exec func(cnt *compactionTransactCounter) error, rollback func() error) { + defer func() { + if x := recover(); x != nil { + if x == errCompactionTransactExiting && rollback != nil { + if err := rollback(); err != nil { + db.logf("%s rollback error %q", name, err) + } + } + panic(x) + } + }() + + const ( + backoffMin = 1 * time.Second + backoffMax = 8 * time.Second + backoffMul = 2 * time.Second + ) + backoff := backoffMin + backoffT := time.NewTimer(backoff) + lastCnt := compactionTransactCounter(0) + for n := 0; ; n++ { + // Check wether the DB is closed. + if db.isClosed() { + db.logf("%s exiting", name) + db.compactionExitTransact() + } else if n > 0 { + db.logf("%s retrying N·%d", name, n) + } + + // Execute. + cnt := compactionTransactCounter(0) + err := exec(&cnt) + + // Set compaction error status. + select { + case db.compErrSetC <- err: + case _, _ = <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + if err == nil { + return + } + db.logf("%s error I·%d %q", name, cnt, err) + + // Reset backoff duration if counter is advancing. + if cnt > lastCnt { + backoff = backoffMin + lastCnt = cnt + } + + // Backoff. + backoffT.Reset(backoff) + if backoff < backoffMax { + backoff *= backoffMul + if backoff > backoffMax { + backoff = backoffMax + } + } + select { + case <-backoffT.C: + case _, _ = <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + } +} + +func (db *DB) compactionExitTransact() { + panic(errCompactionTransactExiting) +} + +func (db *DB) memCompaction() { + mem := db.getFrozenMem() + if mem == nil { + return + } + defer mem.decref() + + c := newCMem(db.s) + stats := new(cStatsStaging) + + db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size())) + + // Don't compact empty memdb. + if mem.mdb.Len() == 0 { + db.logf("mem@flush skipping") + // drop frozen mem + db.dropFrozenMem() + return + } + + // Pause table compaction. + ch := make(chan struct{}) + select { + case db.tcompPauseC <- (chan<- struct{})(ch): + case _, _ = <-db.closeC: + return + } + + db.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) { + stats.startTimer() + defer stats.stopTimer() + return c.flush(mem.mdb, -1) + }, func() error { + for _, r := range c.rec.addedTables { + db.logf("mem@flush rollback @%d", r.num) + f := db.s.getTableFile(r.num) + if err := f.Remove(); err != nil { + return err + } + } + return nil + }) + + db.compactionTransact("mem@commit", func(cnt *compactionTransactCounter) (err error) { + stats.startTimer() + defer stats.stopTimer() + return c.commit(db.journalFile.Num(), db.frozenSeq) + }, nil) + + db.logf("mem@flush commited F·%d T·%v", len(c.rec.addedTables), stats.duration) + + for _, r := range c.rec.addedTables { + stats.write += r.size + } + db.compStats[c.level].add(stats) + + // Drop frozen mem. + db.dropFrozenMem() + + // Resume table compaction. + select { + case <-ch: + case _, _ = <-db.closeC: + return + } + + // Trigger table compaction. + db.compTrigger(db.mcompTriggerC) +} + +func (db *DB) tableCompaction(c *compaction, noTrivial bool) { + rec := new(sessionRecord) + rec.addCompactionPointer(c.level, c.imax) + + if !noTrivial && c.trivial() { + t := c.tables[0][0] + db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) + rec.deleteTable(c.level, t.file.Num()) + rec.addTableFile(c.level+1, t) + db.compactionTransact("table@move", func(cnt *compactionTransactCounter) (err error) { + return db.s.commit(rec) + }, nil) + return + } + + var stats [2]cStatsStaging + for i, tables := range c.tables { + for _, t := range tables { + stats[i].read += t.size + // Insert deleted tables into record + rec.deleteTable(c.level+i, t.file.Num()) + } + } + sourceSize := int(stats[0].read + stats[1].read) + minSeq := db.minSeq() + db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) + + var snapUkey []byte + var snapHasUkey bool + var snapSeq uint64 + var snapIter int + var snapDropCnt int + var dropCnt int + db.compactionTransact("table@build", func(cnt *compactionTransactCounter) (err error) { + ukey := append([]byte{}, snapUkey...) + hasUkey := snapHasUkey + lseq := snapSeq + dropCnt = snapDropCnt + snapSched := snapIter == 0 + + var tw *tWriter + finish := func() error { + t, err := tw.finish() + if err != nil { + return err + } + rec.addTableFile(c.level+1, t) + stats[1].write += t.size + db.logf("table@build created L%d@%d N·%d S·%s %q:%q", c.level+1, t.file.Num(), tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + return nil + } + + defer func() { + stats[1].stopTimer() + if tw != nil { + tw.drop() + tw = nil + } + }() + + stats[1].startTimer() + iter := c.newIterator() + defer iter.Release() + for i := 0; iter.Next(); i++ { + // Incr transact counter. + cnt.incr() + + // Skip until last state. + if i < snapIter { + continue + } + + ikey := iKey(iter.Key()) + + if c.shouldStopBefore(ikey) && tw != nil { + err = finish() + if err != nil { + return + } + snapSched = true + tw = nil + } + + // Scheduled for snapshot, snapshot will used to retry compaction + // if error occured. + if snapSched { + snapUkey = append(snapUkey[:0], ukey...) + snapHasUkey = hasUkey + snapSeq = lseq + snapIter = i + snapDropCnt = dropCnt + snapSched = false + } + + if seq, vt, ok := ikey.parseNum(); !ok { + // Don't drop error keys + ukey = ukey[:0] + hasUkey = false + lseq = kMaxSeq + } else { + if !hasUkey || db.s.icmp.uCompare(ikey.ukey(), ukey) != 0 { + // First occurrence of this user key + ukey = append(ukey[:0], ikey.ukey()...) + hasUkey = true + lseq = kMaxSeq + } + + drop := false + if lseq <= minSeq { + // Dropped because newer entry for same user key exist + drop = true // (A) + } else if vt == tDel && seq <= minSeq && c.baseLevelForKey(ukey) { + // For this user key: + // (1) there is no data in higher levels + // (2) data in lower levels will have larger seq numbers + // (3) data in layers that are being compacted here and have + // smaller seq numbers will be dropped in the next + // few iterations of this loop (by rule (A) above). + // Therefore this deletion marker is obsolete and can be dropped. + drop = true + } + + lseq = seq + if drop { + dropCnt++ + continue + } + } + + // Create new table if not already + if tw == nil { + // Check for pause event. + select { + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + case _, _ = <-db.closeC: + db.compactionExitTransact() + default: + } + + // Create new table. + tw, err = db.s.tops.create() + if err != nil { + return + } + } + + // Write key/value into table + err = tw.append(ikey, iter.Value()) + if err != nil { + return + } + + // Finish table if it is big enough + if tw.tw.BytesLen() >= kMaxTableSize { + err = finish() + if err != nil { + return + } + snapSched = true + tw = nil + } + } + + err = iter.Error() + if err != nil { + return + } + + // Finish last table + if tw != nil && !tw.empty() { + err = finish() + if err != nil { + return + } + tw = nil + } + return + }, func() error { + for _, r := range rec.addedTables { + db.logf("table@build rollback @%d", r.num) + f := db.s.getTableFile(r.num) + if err := f.Remove(); err != nil { + return err + } + } + return nil + }) + + // Commit changes + db.compactionTransact("table@commit", func(cnt *compactionTransactCounter) (err error) { + stats[1].startTimer() + defer stats[1].stopTimer() + return db.s.commit(rec) + }, nil) + + resultSize := int(stats[1].write) + db.logf("table@compaction commited F%s S%s D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), dropCnt, stats[1].duration) + + // Save compaction stats + for i := range stats { + db.compStats[c.level+1].add(&stats[i]) + } +} + +func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { + db.logf("table@compaction range L%d %q:%q", level, umin, umax) + + if level >= 0 { + if c := db.s.getCompactionRange(level, umin, umax); c != nil { + db.tableCompaction(c, true) + } + } else { + v := db.s.version_NB() + + m := 1 + for i, t := range v.tables[1:] { + if t.overlaps(db.s.icmp, umin, umax, false) { + m = i + 1 + } + } + + for level := 0; level < m; level++ { + if c := db.s.getCompactionRange(level, umin, umax); c != nil { + db.tableCompaction(c, true) + } + } + } +} + +func (db *DB) tableAutoCompaction() { + if c := db.s.pickCompaction(); c != nil { + db.tableCompaction(c, false) + } +} + +func (db *DB) tableNeedCompaction() bool { + return db.s.version_NB().needCompaction() +} + +func (db *DB) pauseCompaction(ch chan<- struct{}) { + select { + case ch <- struct{}{}: + case _, _ = <-db.closeC: + db.compactionExitTransact() + } +} + +type cCmd interface { + ack(err error) +} + +type cIdle struct { + ackC chan<- error +} + +func (r cIdle) ack(err error) { + r.ackC <- err +} + +type cRange struct { + level int + min, max []byte + ackC chan<- error +} + +func (r cRange) ack(err error) { + defer func() { + recover() + }() + if r.ackC != nil { + r.ackC <- err + } +} + +func (db *DB) compSendIdle(compC chan<- cCmd) error { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cIdle{ch}: + case err := <-db.compErrC: + return err + case _, _ = <-db.closeC: + return ErrClosed + } + // Wait cmd. + return <-ch +} + +func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cRange{level, min, max, ch}: + case err := <-db.compErrC: + return err + case _, _ = <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-db.compErrC: + case err = <-ch: + } + return err +} + +func (db *DB) compTrigger(compTriggerC chan struct{}) { + select { + case compTriggerC <- struct{}{}: + default: + } +} + +func (db *DB) mCompaction() { + var x cCmd + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + select { + case x = <-db.mcompCmdC: + db.memCompaction() + x.ack(nil) + x = nil + case <-db.mcompTriggerC: + db.memCompaction() + case _, _ = <-db.closeC: + return + } + } +} + +func (db *DB) tCompaction() { + var x cCmd + var ackQ []cCmd + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + for i := range ackQ { + ackQ[i].ack(ErrClosed) + ackQ[i] = nil + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + if db.tableNeedCompaction() { + select { + case x = <-db.tcompCmdC: + case <-db.tcompTriggerC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case _, _ = <-db.closeC: + return + default: + } + } else { + for i := range ackQ { + ackQ[i].ack(nil) + ackQ[i] = nil + } + ackQ = ackQ[:0] + select { + case x = <-db.tcompCmdC: + case <-db.tcompTriggerC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case _, _ = <-db.closeC: + return + } + } + if x != nil { + switch cmd := x.(type) { + case cIdle: + ackQ = append(ackQ, x) + case cRange: + db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) + x.ack(nil) + } + x = nil + } + db.tableAutoCompaction() + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go new file mode 100644 index 0000000..f392c46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go @@ -0,0 +1,331 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "runtime" + "sync" + "sync/atomic" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") +) + +type memdbReleaser struct { + once sync.Once + m *memDB +} + +func (mr *memdbReleaser) Release() { + mr.once.Do(func() { + mr.m.decref() + }) +} + +func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + em, fm := db.getMems() + v := db.s.version() + + ti := v.getIterators(slice, ro) + n := len(ti) + 2 + i := make([]iterator.Iterator, 0, n) + emi := em.mdb.NewIterator(slice) + emi.SetReleaser(&memdbReleaser{m: em}) + i = append(i, emi) + if fm != nil { + fmi := fm.mdb.NewIterator(slice) + fmi.SetReleaser(&memdbReleaser{m: fm}) + i = append(i, fmi) + } + i = append(i, ti...) + strict := db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator) + mi := iterator.NewMergedIterator(i, db.s.icmp, strict) + mi.SetReleaser(&versionReleaser{v: v}) + return mi +} + +func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { + var islice *util.Range + if slice != nil { + islice = &util.Range{} + if slice.Start != nil { + islice.Start = newIKey(slice.Start, kMaxSeq, tSeek) + } + if slice.Limit != nil { + islice.Limit = newIKey(slice.Limit, kMaxSeq, tSeek) + } + } + rawIter := db.newRawIterator(islice, ro) + iter := &dbIter{ + db: db, + icmp: db.s.icmp, + iter: rawIter, + seq: seq, + strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator), + key: make([]byte, 0), + value: make([]byte, 0), + } + atomic.AddInt32(&db.aliveIters, 1) + runtime.SetFinalizer(iter, (*dbIter).Release) + return iter +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +// dbIter represent an interator states over a database session. +type dbIter struct { + db *DB + icmp *iComparer + iter iterator.Iterator + seq uint64 + strict bool + + dir dir + key []byte + value []byte + err error + releaser util.Releaser +} + +func (i *dbIter) setErr(err error) { + i.err = err + i.key = nil + i.value = nil +} + +func (i *dbIter) iterErr() { + if err := i.iter.Error(); err != nil { + i.setErr(err) + } +} + +func (i *dbIter) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *dbIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.First() { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.Last() { + return i.prev() + } + i.dir = dirSOI + i.iterErr() + return false +} + +func (i *dbIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ikey := newIKey(key, i.seq, tSeek) + if i.iter.Seek(ikey) { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) next() bool { + for { + ukey, seq, t, ok := parseIkey(i.iter.Key()) + if ok { + if seq <= i.seq { + switch t { + case tDel: + // Skip deleted key. + i.key = append(i.key[:0], ukey...) + i.dir = dirForward + case tVal: + if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + i.dir = dirForward + return true + } + } + } + } else if i.strict { + i.setErr(errInvalidIkey) + break + } + if !i.iter.Next() { + i.dir = dirEOI + i.iterErr() + break + } + } + return false +} + +func (i *dbIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { + i.dir = dirEOI + i.iterErr() + return false + } + return i.next() +} + +func (i *dbIter) prev() bool { + i.dir = dirBackward + del := true + if i.iter.Valid() { + for { + ukey, seq, t, ok := parseIkey(i.iter.Key()) + if ok { + if seq <= i.seq { + if !del && i.icmp.uCompare(ukey, i.key) < 0 { + return true + } + del = (t == tDel) + if !del { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + } + } + } else if i.strict { + i.setErr(errInvalidIkey) + return false + } + if !i.iter.Prev() { + break + } + } + } + if del { + i.dir = dirSOI + i.iterErr() + return false + } + return true +} + +func (i *dbIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + for i.iter.Prev() { + ukey, _, _, ok := parseIkey(i.iter.Key()) + if ok { + if i.icmp.uCompare(ukey, i.key) < 0 { + goto cont + } + } else if i.strict { + i.setErr(errInvalidIkey) + return false + } + } + i.dir = dirSOI + i.iterErr() + return false + } + +cont: + return i.prev() +} + +func (i *dbIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *dbIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *dbIter) Release() { + if i.dir != dirReleased { + // Clear the finalizer. + runtime.SetFinalizer(i, nil) + + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + + i.dir = dirReleased + i.key = nil + i.value = nil + i.iter.Release() + i.iter = nil + atomic.AddInt32(&i.db.aliveIters, -1) + i.db = nil + } +} + +func (i *dbIter) SetReleaser(releaser util.Releaser) { + if i.dir != dirReleased { + i.releaser = releaser + } +} + +func (i *dbIter) Error() error { + return i.err +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go new file mode 100644 index 0000000..c936175 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go @@ -0,0 +1,169 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "runtime" + "sync" + "sync/atomic" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type snapshotElement struct { + seq uint64 + ref int + // Next and previous pointers in the doubly-linked list of elements. + next, prev *snapshotElement +} + +// Initialize the snapshot. +func (db *DB) initSnapshot() { + db.snapsRoot.next = &db.snapsRoot + db.snapsRoot.prev = &db.snapsRoot +} + +// Acquires a snapshot, based on latest sequence. +func (db *DB) acquireSnapshot() *snapshotElement { + db.snapsMu.Lock() + seq := db.getSeq() + elem := db.snapsRoot.prev + if elem == &db.snapsRoot || elem.seq != seq { + at := db.snapsRoot.prev + next := at.next + elem = &snapshotElement{ + seq: seq, + prev: at, + next: next, + } + at.next = elem + next.prev = elem + } + elem.ref++ + db.snapsMu.Unlock() + return elem +} + +// Releases given snapshot element. +func (db *DB) releaseSnapshot(elem *snapshotElement) { + if !db.isClosed() { + db.snapsMu.Lock() + elem.ref-- + if elem.ref == 0 { + elem.prev.next = elem.next + elem.next.prev = elem.prev + elem.next = nil + elem.prev = nil + } else if elem.ref < 0 { + panic("leveldb: Snapshot: negative element reference") + } + db.snapsMu.Unlock() + } +} + +// Gets minimum sequence that not being snapshoted. +func (db *DB) minSeq() uint64 { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + elem := db.snapsRoot.prev + if elem != &db.snapsRoot { + return elem.seq + } + return db.getSeq() +} + +// Snapshot is a DB snapshot. +type Snapshot struct { + db *DB + elem *snapshotElement + mu sync.RWMutex + released bool +} + +// Creates new snapshot object. +func (db *DB) newSnapshot() *Snapshot { + snap := &Snapshot{ + db: db, + elem: db.acquireSnapshot(), + } + atomic.AddInt32(&db.aliveSnaps, 1) + runtime.SetFinalizer(snap, (*Snapshot).Release) + return snap +} + +// Get gets the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.get(key, snap.elem.seq, ro) +} + +// NewIterator returns an iterator for the snapshot of the uderlying DB. +// The returned iterator is not goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// Releasing the snapshot doesn't mean releasing the iterator too, the +// iterator would be still valid until released. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := snap.db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + snap.mu.Lock() + defer snap.mu.Unlock() + if snap.released { + return iterator.NewEmptyIterator(ErrSnapshotReleased) + } + // Since iterator already hold version ref, it doesn't need to + // hold snapshot ref. + return snap.db.newIterator(snap.elem.seq, slice, ro) +} + +// Release releases the snapshot. This will not release any returned +// iterators, the iterators would still be valid until released or the +// underlying DB is closed. +// +// Other methods should not be called after the snapshot has been released. +func (snap *Snapshot) Release() { + snap.mu.Lock() + defer snap.mu.Unlock() + + if !snap.released { + // Clear the finalizer. + runtime.SetFinalizer(snap, nil) + + snap.released = true + snap.db.releaseSnapshot(snap.elem) + atomic.AddInt32(&snap.db.aliveSnaps, -1) + snap.db = nil + snap.elem = nil + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go new file mode 100644 index 0000000..6b53805 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go @@ -0,0 +1,202 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + "time" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +type memDB struct { + db *DB + mdb *memdb.DB + ref int32 +} + +func (m *memDB) incref() { + atomic.AddInt32(&m.ref, 1) +} + +func (m *memDB) decref() { + if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { + // Only put back memdb with std capacity. + if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() { + m.mdb.Reset() + m.db.mpoolPut(m.mdb) + } + m.db = nil + m.mdb = nil + } else if ref < 0 { + panic("negative memdb ref") + } +} + +// Get latest sequence number. +func (db *DB) getSeq() uint64 { + return atomic.LoadUint64(&db.seq) +} + +// Atomically adds delta to seq. +func (db *DB) addSeq(delta uint64) { + atomic.AddUint64(&db.seq, delta) +} + +func (db *DB) mpoolPut(mem *memdb.DB) { + defer func() { + recover() + }() + select { + case db.memPool <- mem: + default: + } +} + +func (db *DB) mpoolGet() *memdb.DB { + select { + case mem := <-db.memPool: + return mem + default: + return nil + } +} + +func (db *DB) mpoolDrain() { + ticker := time.NewTicker(30 * time.Second) + for { + select { + case <-ticker.C: + select { + case <-db.memPool: + default: + } + case _, _ = <-db.closeC: + close(db.memPool) + return + } + } +} + +// Create new memdb and froze the old one; need external synchronization. +// newMem only called synchronously by the writer. +func (db *DB) newMem(n int) (mem *memDB, err error) { + num := db.s.allocFileNum() + file := db.s.getJournalFile(num) + w, err := file.Create() + if err != nil { + db.s.reuseFileNum(num) + return + } + + db.memMu.Lock() + defer db.memMu.Unlock() + + if db.frozenMem != nil { + panic("still has frozen mem") + } + + if db.journal == nil { + db.journal = journal.NewWriter(w) + } else { + db.journal.Reset(w) + db.journalWriter.Close() + db.frozenJournalFile = db.journalFile + } + db.journalWriter = w + db.journalFile = file + db.frozenMem = db.mem + mdb := db.mpoolGet() + if mdb == nil || mdb.Capacity() < n { + mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) + } + mem = &memDB{ + db: db, + mdb: mdb, + ref: 2, + } + db.mem = mem + // The seq only incremented by the writer. And whoever called newMem + // should hold write lock, so no need additional synchronization here. + db.frozenSeq = db.seq + return +} + +// Get all memdbs. +func (db *DB) getMems() (e, f *memDB) { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem == nil { + panic("nil effective mem") + } + db.mem.incref() + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.mem, db.frozenMem +} + +// Get frozen memdb. +func (db *DB) getEffectiveMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem == nil { + panic("nil effective mem") + } + db.mem.incref() + return db.mem +} + +// Check whether we has frozen memdb. +func (db *DB) hasFrozenMem() bool { + db.memMu.RLock() + defer db.memMu.RUnlock() + return db.frozenMem != nil +} + +// Get frozen memdb. +func (db *DB) getFrozenMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.frozenMem +} + +// Drop frozen memdb; assume that frozen memdb isn't nil. +func (db *DB) dropFrozenMem() { + db.memMu.Lock() + if err := db.frozenJournalFile.Remove(); err != nil { + db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) + } else { + db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) + } + db.frozenJournalFile = nil + db.frozenMem.decref() + db.frozenMem = nil + db.memMu.Unlock() +} + +// Set closed flag; return true if not already closed. +func (db *DB) setClosed() bool { + return atomic.CompareAndSwapUint32(&db.closed, 0, 1) +} + +// Check whether DB was closed. +func (db *DB) isClosed() bool { + return atomic.LoadUint32(&db.closed) != 0 +} + +// Check read ok status. +func (db *DB) ok() error { + if db.isClosed() { + return ErrClosed + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go new file mode 100644 index 0000000..35a4fe7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go @@ -0,0 +1,1890 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func tkey(i int) []byte { + return []byte(fmt.Sprintf("%016d", i)) +} + +func tval(seed, n int) []byte { + r := rand.New(rand.NewSource(int64(seed))) + return randomString(r, n) +} + +type dbHarness struct { + t *testing.T + + stor *testStorage + db *DB + o *opt.Options + ro *opt.ReadOptions + wo *opt.WriteOptions +} + +func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { + h := new(dbHarness) + h.init(t, o) + return h +} + +func newDbHarness(t *testing.T) *dbHarness { + return newDbHarnessWopt(t, &opt.Options{}) +} + +func (h *dbHarness) init(t *testing.T, o *opt.Options) { + h.t = t + h.stor = newTestStorage(t) + h.o = o + h.ro = nil + h.wo = nil + + if err := h.openDB0(); err != nil { + // So that it will come after fatal message. + defer h.stor.Close() + h.t.Fatal("Open (init): got error: ", err) + } +} + +func (h *dbHarness) openDB0() (err error) { + h.t.Log("opening DB") + h.db, err = Open(h.stor, h.o) + return +} + +func (h *dbHarness) openDB() { + if err := h.openDB0(); err != nil { + h.t.Fatal("Open: got error: ", err) + } +} + +func (h *dbHarness) closeDB0() error { + h.t.Log("closing DB") + return h.db.Close() +} + +func (h *dbHarness) closeDB() { + if err := h.closeDB0(); err != nil { + h.t.Error("Close: got error: ", err) + } + h.stor.CloseCheck() + runtime.GC() +} + +func (h *dbHarness) reopenDB() { + h.closeDB() + h.openDB() +} + +func (h *dbHarness) close() { + h.closeDB0() + h.db = nil + h.stor.Close() + h.stor = nil + runtime.GC() +} + +func (h *dbHarness) openAssert(want bool) { + db, err := Open(h.stor, h.o) + if err != nil { + if want { + h.t.Error("Open: assert: got error: ", err) + } else { + h.t.Log("Open: assert: got error (expected): ", err) + } + } else { + if !want { + h.t.Error("Open: assert: expect error") + } + db.Close() + } +} + +func (h *dbHarness) write(batch *Batch) { + if err := h.db.Write(batch, h.wo); err != nil { + h.t.Error("Write: got error: ", err) + } +} + +func (h *dbHarness) put(key, value string) { + if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { + h.t.Error("Put: got error: ", err) + } +} + +func (h *dbHarness) putMulti(n int, low, hi string) { + for i := 0; i < n; i++ { + h.put(low, "begin") + h.put(hi, "end") + h.compactMem() + } +} + +func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { + t := h.t + db := h.db + + var res uint64 + v := db.s.version() + for i, tt := range v.tables[1 : len(v.tables)-1] { + level := i + 1 + next := v.tables[level+1] + for _, t := range tt { + r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) + sum := r.size() + if sum > res { + res = sum + } + } + } + v.release() + + if res > want { + t.Errorf("next level overlapping bytes is more than %d, got=%d", want, res) + } +} + +func (h *dbHarness) delete(key string) { + t := h.t + db := h.db + + err := db.Delete([]byte(key), h.wo) + if err != nil { + t.Error("Delete: got error: ", err) + } +} + +func (h *dbHarness) assertNumKeys(want int) { + iter := h.db.NewIterator(nil, h.ro) + defer iter.Release() + got := 0 + for iter.Next() { + got++ + } + if err := iter.Error(); err != nil { + h.t.Error("assertNumKeys: ", err) + } + if want != got { + h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) + } +} + +func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { + t := h.t + v, err := db.Get([]byte(key), h.ro) + switch err { + case ErrNotFound: + if expectFound { + t.Errorf("Get: key '%s' not found, want found", key) + } + case nil: + found = true + if !expectFound { + t.Errorf("Get: key '%s' found, want not found", key) + } + default: + t.Error("Get: got error: ", err) + } + return +} + +func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { + return h.getr(h.db, key, expectFound) +} + +func (h *dbHarness) getValr(db Reader, key, value string) { + t := h.t + found, r := h.getr(db, key, true) + if !found { + return + } + rval := string(r) + if rval != value { + t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) + } +} + +func (h *dbHarness) getVal(key, value string) { + h.getValr(h.db, key, value) +} + +func (h *dbHarness) allEntriesFor(key, want string) { + t := h.t + db := h.db + s := db.s + + ikey := newIKey([]byte(key), kMaxSeq, tVal) + iter := db.newRawIterator(nil, nil) + if !iter.Seek(ikey) && iter.Error() != nil { + t.Error("AllEntries: error during seek, err: ", iter.Error()) + return + } + res := "[ " + first := true + for iter.Valid() { + rkey := iKey(iter.Key()) + if _, t, ok := rkey.parseNum(); ok { + if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 { + break + } + if !first { + res += ", " + } + first = false + switch t { + case tVal: + res += string(iter.Value()) + case tDel: + res += "DEL" + } + } else { + if !first { + res += ", " + } + first = false + res += "CORRUPTED" + } + iter.Next() + } + if !first { + res += " " + } + res += "]" + if res != want { + t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) + } +} + +// Return a string that contains all key,value pairs in order, +// formatted like "(k1->v1)(k2->v2)". +func (h *dbHarness) getKeyVal(want string) { + t := h.t + db := h.db + + s, err := db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + res := "" + iter := s.NewIterator(nil, nil) + for iter.Next() { + res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) + } + iter.Release() + + if res != want { + t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) + } + s.Release() +} + +func (h *dbHarness) waitCompaction() { + t := h.t + db := h.db + if err := db.compSendIdle(db.tcompCmdC); err != nil { + t.Error("compaction error: ", err) + } +} + +func (h *dbHarness) waitMemCompaction() { + t := h.t + db := h.db + + if err := db.compSendIdle(db.mcompCmdC); err != nil { + t.Error("compaction error: ", err) + } +} + +func (h *dbHarness) compactMem() { + t := h.t + db := h.db + + db.writeLockC <- struct{}{} + defer func() { + <-db.writeLockC + }() + + if _, err := db.rotateMem(0); err != nil { + t.Error("compaction error: ", err) + } + if err := db.compSendIdle(db.mcompCmdC); err != nil { + t.Error("compaction error: ", err) + } + + if h.totalTables() == 0 { + t.Error("zero tables after mem compaction") + } +} + +func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { + t := h.t + db := h.db + + var _min, _max []byte + if min != "" { + _min = []byte(min) + } + if max != "" { + _max = []byte(max) + } + + if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { + if wanterr { + t.Log("CompactRangeAt: got error (expected): ", err) + } else { + t.Error("CompactRangeAt: got error: ", err) + } + } else if wanterr { + t.Error("CompactRangeAt: expect error") + } +} + +func (h *dbHarness) compactRangeAt(level int, min, max string) { + h.compactRangeAtErr(level, min, max, false) +} + +func (h *dbHarness) compactRange(min, max string) { + t := h.t + db := h.db + + var r util.Range + if min != "" { + r.Start = []byte(min) + } + if max != "" { + r.Limit = []byte(max) + } + if err := db.CompactRange(r); err != nil { + t.Error("CompactRange: got error: ", err) + } +} + +func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { + t := h.t + db := h.db + + s, err := db.SizeOf([]util.Range{ + {[]byte(start), []byte(limit)}, + }) + if err != nil { + t.Error("SizeOf: got error: ", err) + } + if s.Sum() < low || s.Sum() > hi { + t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d", + shorten(start), shorten(limit), low, hi, s.Sum()) + } +} + +func (h *dbHarness) getSnapshot() (s *Snapshot) { + s, err := h.db.GetSnapshot() + if err != nil { + h.t.Fatal("GetSnapshot: got error: ", err) + } + return +} +func (h *dbHarness) tablesPerLevel(want string) { + res := "" + nz := 0 + v := h.db.s.version() + for level, tt := range v.tables { + if level > 0 { + res += "," + } + res += fmt.Sprint(len(tt)) + if len(tt) > 0 { + nz = len(res) + } + } + v.release() + res = res[:nz] + if res != want { + h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) + } +} + +func (h *dbHarness) totalTables() (n int) { + v := h.db.s.version() + for _, tt := range v.tables { + n += len(tt) + } + v.release() + return +} + +type keyValue interface { + Key() []byte + Value() []byte +} + +func testKeyVal(t *testing.T, kv keyValue, want string) { + res := string(kv.Key()) + "->" + string(kv.Value()) + if res != want { + t.Errorf("invalid key/value, want=%q, got=%q", want, res) + } +} + +func numKey(num int) string { + return fmt.Sprintf("key%06d", num) +} + +var _bloom_filter = filter.NewBloomFilter(10) + +func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { + for i := 0; i < 4; i++ { + func() { + switch i { + case 0: + case 1: + if o == nil { + o = &opt.Options{Filter: _bloom_filter} + } else { + old := o + o = &opt.Options{} + *o = *old + o.Filter = _bloom_filter + } + case 2: + if o == nil { + o = &opt.Options{Compression: opt.NoCompression} + } else { + old := o + o = &opt.Options{} + *o = *old + o.Compression = opt.NoCompression + } + } + h := newDbHarnessWopt(t, o) + defer h.close() + switch i { + case 3: + h.reopenDB() + } + f(h) + }() + } +} + +func trun(t *testing.T, f func(h *dbHarness)) { + truno(t, nil, f) +} + +func testAligned(t *testing.T, name string, offset uintptr) { + if offset%8 != 0 { + t.Errorf("field %s offset is not 64-bit aligned", name) + } +} + +func Test_FieldsAligned(t *testing.T) { + p1 := new(DB) + testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) + p2 := new(session) + testAligned(t, "session.stFileNum", unsafe.Offsetof(p2.stFileNum)) + testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) + testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) + testAligned(t, "session.stSeq", unsafe.Offsetof(p2.stSeq)) +} + +func TestDb_Locking(t *testing.T) { + h := newDbHarness(t) + defer h.stor.Close() + h.openAssert(false) + h.closeDB() + h.openAssert(true) +} + +func TestDb_Empty(t *testing.T) { + trun(t, func(h *dbHarness) { + h.get("foo", false) + + h.reopenDB() + h.get("foo", false) + }) +} + +func TestDb_ReadWrite(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.getVal("foo", "v1") + h.put("bar", "v2") + h.put("foo", "v3") + h.getVal("foo", "v3") + h.getVal("bar", "v2") + + h.reopenDB() + h.getVal("foo", "v3") + h.getVal("bar", "v2") + }) +} + +func TestDb_PutDeleteGet(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.getVal("foo", "v1") + h.put("foo", "v2") + h.getVal("foo", "v2") + h.delete("foo") + h.get("foo", false) + + h.reopenDB() + h.get("foo", false) + }) +} + +func TestDb_EmptyBatch(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.get("foo", false) + err := h.db.Write(new(Batch), h.wo) + if err != nil { + t.Error("writing empty batch yield error: ", err) + } + h.get("foo", false) +} + +func TestDb_GetFromFrozen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) + defer h.close() + + h.put("foo", "v1") + h.getVal("foo", "v1") + + h.stor.DelaySync(storage.TypeTable) // Block sync calls + h.put("k1", strings.Repeat("x", 100000)) // Fill memtable + h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction + for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { + time.Sleep(10 * time.Microsecond) + } + if h.db.getFrozenMem() == nil { + h.stor.ReleaseSync(storage.TypeTable) + t.Fatal("No frozen mem") + } + h.getVal("foo", "v1") + h.stor.ReleaseSync(storage.TypeTable) // Release sync calls + + h.reopenDB() + h.getVal("foo", "v1") + h.get("k1", true) + h.get("k2", true) +} + +func TestDb_GetFromTable(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.compactMem() + h.getVal("foo", "v1") + }) +} + +func TestDb_GetSnapshot(t *testing.T) { + trun(t, func(h *dbHarness) { + bar := strings.Repeat("b", 200) + h.put("foo", "v1") + h.put(bar, "v1") + + snap, err := h.db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + + h.put("foo", "v2") + h.put(bar, "v2") + + h.getVal("foo", "v2") + h.getVal(bar, "v2") + h.getValr(snap, "foo", "v1") + h.getValr(snap, bar, "v1") + + h.compactMem() + + h.getVal("foo", "v2") + h.getVal(bar, "v2") + h.getValr(snap, "foo", "v1") + h.getValr(snap, bar, "v1") + + snap.Release() + + h.reopenDB() + h.getVal("foo", "v2") + h.getVal(bar, "v2") + }) +} + +func TestDb_GetLevel0Ordering(t *testing.T) { + trun(t, func(h *dbHarness) { + for i := 0; i < 4; i++ { + h.put("bar", fmt.Sprintf("b%d", i)) + h.put("foo", fmt.Sprintf("v%d", i)) + h.compactMem() + } + h.getVal("foo", "v3") + h.getVal("bar", "b3") + + v := h.db.s.version() + t0len := v.tLen(0) + v.release() + if t0len < 2 { + t.Errorf("level-0 tables is less than 2, got %d", t0len) + } + + h.reopenDB() + h.getVal("foo", "v3") + h.getVal("bar", "b3") + }) +} + +func TestDb_GetOrderedByLevels(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.compactMem() + h.compactRange("a", "z") + h.getVal("foo", "v1") + h.put("foo", "v2") + h.compactMem() + h.getVal("foo", "v2") + }) +} + +func TestDb_GetPicksCorrectFile(t *testing.T) { + trun(t, func(h *dbHarness) { + // Arrange to have multiple files in a non-level-0 level. + h.put("a", "va") + h.compactMem() + h.compactRange("a", "b") + h.put("x", "vx") + h.compactMem() + h.compactRange("x", "y") + h.put("f", "vf") + h.compactMem() + h.compactRange("f", "g") + + h.getVal("a", "va") + h.getVal("f", "vf") + h.getVal("x", "vx") + + h.compactRange("", "") + h.getVal("a", "va") + h.getVal("f", "vf") + h.getVal("x", "vx") + }) +} + +func TestDb_GetEncountersEmptyLevel(t *testing.T) { + trun(t, func(h *dbHarness) { + // Arrange for the following to happen: + // * sstable A in level 0 + // * nothing in level 1 + // * sstable B in level 2 + // Then do enough Get() calls to arrange for an automatic compaction + // of sstable A. A bug would cause the compaction to be marked as + // occuring at level 1 (instead of the correct level 0). + + // Step 1: First place sstables in levels 0 and 2 + for i := 0; ; i++ { + if i >= 100 { + t.Fatal("could not fill levels-0 and level-2") + } + v := h.db.s.version() + if v.tLen(0) > 0 && v.tLen(2) > 0 { + v.release() + break + } + v.release() + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + + h.getVal("a", "begin") + h.getVal("z", "end") + } + + // Step 2: clear level 1 if necessary. + h.compactRangeAt(1, "", "") + h.tablesPerLevel("1,0,1") + + h.getVal("a", "begin") + h.getVal("z", "end") + + // Step 3: read a bunch of times + for i := 0; i < 200; i++ { + h.get("missing", false) + } + + // Step 4: Wait for compaction to finish + h.waitCompaction() + + v := h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + v.release() + + h.getVal("a", "begin") + h.getVal("z", "end") + }) +} + +func TestDb_IterMultiWithDelete(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("a", "va") + h.put("b", "vb") + h.put("c", "vc") + h.delete("b") + h.get("b", false) + + iter := h.db.NewIterator(nil, nil) + iter.Seek([]byte("c")) + testKeyVal(t, iter, "c->vc") + iter.Prev() + testKeyVal(t, iter, "a->va") + iter.Release() + + h.compactMem() + + iter = h.db.NewIterator(nil, nil) + iter.Seek([]byte("c")) + testKeyVal(t, iter, "c->vc") + iter.Prev() + testKeyVal(t, iter, "a->va") + iter.Release() + }) +} + +func TestDb_IteratorPinsRef(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "hello") + + // Get iterator that will yield the current contents of the DB. + iter := h.db.NewIterator(nil, nil) + + // Write to force compactions + h.put("foo", "newvalue1") + for i := 0; i < 100; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } + h.put("foo", "newvalue2") + + iter.First() + testKeyVal(t, iter, "foo->hello") + if iter.Next() { + t.Errorf("expect eof") + } + iter.Release() +} + +func TestDb_Recover(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.put("baz", "v5") + + h.reopenDB() + h.getVal("foo", "v1") + + h.getVal("foo", "v1") + h.getVal("baz", "v5") + h.put("bar", "v2") + h.put("foo", "v3") + + h.reopenDB() + h.getVal("foo", "v3") + h.put("foo", "v4") + h.getVal("foo", "v4") + h.getVal("bar", "v2") + h.getVal("baz", "v5") + }) +} + +func TestDb_RecoverWithEmptyJournal(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.put("foo", "v2") + + h.reopenDB() + h.reopenDB() + h.put("foo", "v3") + + h.reopenDB() + h.getVal("foo", "v3") + }) +} + +func TestDb_RecoverDuringMemtableCompaction(t *testing.T) { + truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { + + h.stor.DelaySync(storage.TypeTable) + h.put("big1", strings.Repeat("x", 10000000)) + h.put("big2", strings.Repeat("y", 1000)) + h.put("bar", "v2") + h.stor.ReleaseSync(storage.TypeTable) + + h.reopenDB() + h.getVal("bar", "v2") + h.getVal("big1", strings.Repeat("x", 10000000)) + h.getVal("big2", strings.Repeat("y", 1000)) + }) +} + +func TestDb_MinorCompactionsHappen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) + defer h.close() + + n := 500 + + key := func(i int) string { + return fmt.Sprintf("key%06d", i) + } + + for i := 0; i < n; i++ { + h.put(key(i), key(i)+strings.Repeat("v", 1000)) + } + + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) + } + + h.reopenDB() + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) + } +} + +func TestDb_RecoverWithLargeJournal(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("big1", strings.Repeat("1", 200000)) + h.put("big2", strings.Repeat("2", 200000)) + h.put("small3", strings.Repeat("3", 10)) + h.put("small4", strings.Repeat("4", 10)) + h.tablesPerLevel("") + + // Make sure that if we re-open with a small write buffer size that + // we flush table files in the middle of a large journal file. + h.o.WriteBuffer = 100000 + h.reopenDB() + h.getVal("big1", strings.Repeat("1", 200000)) + h.getVal("big2", strings.Repeat("2", 200000)) + h.getVal("small3", strings.Repeat("3", 10)) + h.getVal("small4", strings.Repeat("4", 10)) + v := h.db.s.version() + if v.tLen(0) <= 1 { + t.Errorf("tables-0 less than one") + } + v.release() +} + +func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + WriteBuffer: 10000000, + Compression: opt.NoCompression, + }) + defer h.close() + + v := h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + v.release() + + n := 80 + + // Write 8MB (80 values, each 100K) + for i := 0; i < n; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } + + // Reopening moves updates to level-0 + h.reopenDB() + h.compactRangeAt(0, "", "") + + v = h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + if v.tLen(1) <= 1 { + t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) + } + v.release() + + for i := 0; i < n; i++ { + h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } +} + +func TestDb_RepeatedWritesToSameKey(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) + defer h.close() + + maxTables := kNumLevels + kL0_StopWritesTrigger + + value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) + for i := 0; i < 5*maxTables; i++ { + h.put("key", value) + n := h.totalTables() + if n > maxTables { + t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) + } + } +} + +func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) + defer h.close() + + h.reopenDB() + + maxTables := kNumLevels + kL0_StopWritesTrigger + + value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) + for i := 0; i < 5*maxTables; i++ { + h.put("key", value) + n := h.totalTables() + if n > maxTables { + t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) + } + } +} + +func TestDb_SparseMerge(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + defer h.close() + + h.putMulti(kNumLevels, "A", "Z") + + // Suppose there is: + // small amount of data with prefix A + // large amount of data with prefix B + // small amount of data with prefix C + // and that recent updates have made small changes to all three prefixes. + // Check that we do not do a compaction that merges all of B in one shot. + h.put("A", "va") + value := strings.Repeat("x", 1000) + for i := 0; i < 100000; i++ { + h.put(fmt.Sprintf("B%010d", i), value) + } + h.put("C", "vc") + h.compactMem() + h.compactRangeAt(0, "", "") + h.waitCompaction() + + // Make sparse update + h.put("A", "va2") + h.put("B100", "bvalue2") + h.put("C", "vc2") + h.compactMem() + + h.maxNextLevelOverlappingBytes(20 * 1048576) + h.compactRangeAt(0, "", "") + h.waitCompaction() + h.maxNextLevelOverlappingBytes(20 * 1048576) + h.compactRangeAt(1, "", "") + h.waitCompaction() + h.maxNextLevelOverlappingBytes(20 * 1048576) +} + +func TestDb_SizeOf(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + Compression: opt.NoCompression, + WriteBuffer: 10000000, + }) + defer h.close() + + h.sizeAssert("", "xyz", 0, 0) + h.reopenDB() + h.sizeAssert("", "xyz", 0, 0) + + // Write 8MB (80 values, each 100K) + n := 80 + s1 := 100000 + s2 := 105000 + + for i := 0; i < n; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) + } + + // 0 because SizeOf() does not account for memtable space + h.sizeAssert("", numKey(50), 0, 0) + + for r := 0; r < 3; r++ { + h.reopenDB() + + for cs := 0; cs < n; cs += 10 { + for i := 0; i < n; i += 10 { + h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) + h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) + h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) + } + + h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) + h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) + + h.compactRangeAt(0, numKey(cs), numKey(cs+9)) + } + + v := h.db.s.version() + if v.tLen(0) != 0 { + t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) + } + if v.tLen(1) == 0 { + t.Error("level-1 tables was zero") + } + v.release() + } +} + +func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + defer h.close() + + sizes := []uint64{ + 10000, + 10000, + 100000, + 10000, + 100000, + 10000, + 300000, + 10000, + } + + for i, n := range sizes { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) + } + + for r := 0; r < 3; r++ { + h.reopenDB() + + var x uint64 + for i, n := range sizes { + y := x + if i > 0 { + y += 1000 + } + h.sizeAssert("", numKey(i), x, y) + x += n + } + + h.sizeAssert(numKey(3), numKey(5), 110000, 111000) + + h.compactRangeAt(0, "", "") + } +} + +func TestDb_Snapshot(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + s1 := h.getSnapshot() + h.put("foo", "v2") + s2 := h.getSnapshot() + h.put("foo", "v3") + s3 := h.getSnapshot() + h.put("foo", "v4") + + h.getValr(s1, "foo", "v1") + h.getValr(s2, "foo", "v2") + h.getValr(s3, "foo", "v3") + h.getVal("foo", "v4") + + s3.Release() + h.getValr(s1, "foo", "v1") + h.getValr(s2, "foo", "v2") + h.getVal("foo", "v4") + + s1.Release() + h.getValr(s2, "foo", "v2") + h.getVal("foo", "v4") + + s2.Release() + h.getVal("foo", "v4") + }) +} + +func TestDb_HiddenValuesAreRemoved(t *testing.T) { + trun(t, func(h *dbHarness) { + s := h.db.s + + h.put("foo", "v1") + h.compactMem() + m := kMaxMemCompactLevel + v := s.version() + num := v.tLen(m) + v.release() + if num != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, num) + } + + // Place a table at level last-1 to prevent merging with preceding mutation + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + v = s.version() + if v.tLen(m) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) + } + if v.tLen(m-1) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) + } + v.release() + + h.delete("foo") + h.put("foo", "v2") + h.allEntriesFor("foo", "[ v2, DEL, v1 ]") + h.compactMem() + h.allEntriesFor("foo", "[ v2, DEL, v1 ]") + h.compactRangeAt(m-2, "", "z") + // DEL eliminated, but v1 remains because we aren't compacting that level + // (DEL can be eliminated because v2 hides v1). + h.allEntriesFor("foo", "[ v2, v1 ]") + h.compactRangeAt(m-1, "", "") + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + h.allEntriesFor("foo", "[ v2 ]") + }) +} + +func TestDb_DeletionMarkers2(t *testing.T) { + h := newDbHarness(t) + defer h.close() + s := h.db.s + + h.put("foo", "v1") + h.compactMem() + m := kMaxMemCompactLevel + v := s.version() + num := v.tLen(m) + v.release() + if num != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, num) + } + + // Place a table at level last-1 to prevent merging with preceding mutation + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + v = s.version() + if v.tLen(m) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) + } + if v.tLen(m-1) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) + } + v.release() + + h.delete("foo") + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactMem() // Moves to level last-2 + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactRangeAt(m-2, "", "") + // DEL kept: "last" file overlaps + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactRangeAt(m-1, "", "") + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + h.allEntriesFor("foo", "[ ]") +} + +func TestDb_CompactionTableOpenError(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1}) + defer h.close() + + im := 10 + jm := 10 + for r := 0; r < 2; r++ { + for i := 0; i < im; i++ { + for j := 0; j < jm; j++ { + h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) + } + h.compactMem() + } + } + + if n := h.totalTables(); n != im*2 { + t.Errorf("total tables is %d, want %d", n, im) + } + + h.stor.SetOpenErr(storage.TypeTable) + go h.db.CompactRange(util.Range{}) + if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { + t.Log("compaction error: ", err) + } + h.closeDB0() + h.openDB() + h.stor.SetOpenErr(0) + + for i := 0; i < im; i++ { + for j := 0; j < jm; j++ { + h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) + } + } +} + +func TestDb_OverlapInLevel0(t *testing.T) { + trun(t, func(h *dbHarness) { + if kMaxMemCompactLevel != 2 { + t.Fatal("fix test to reflect the config") + } + + // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. + h.put("100", "v100") + h.put("999", "v999") + h.compactMem() + h.delete("100") + h.delete("999") + h.compactMem() + h.tablesPerLevel("0,1,1") + + // Make files spanning the following ranges in level-0: + // files[0] 200 .. 900 + // files[1] 300 .. 500 + // Note that files are sorted by min key. + h.put("300", "v300") + h.put("500", "v500") + h.compactMem() + h.put("200", "v200") + h.put("600", "v600") + h.put("900", "v900") + h.compactMem() + h.tablesPerLevel("2,1,1") + + // Compact away the placeholder files we created initially + h.compactRangeAt(1, "", "") + h.compactRangeAt(2, "", "") + h.tablesPerLevel("2") + + // Do a memtable compaction. Before bug-fix, the compaction would + // not detect the overlap with level-0 files and would incorrectly place + // the deletion in a deeper level. + h.delete("600") + h.compactMem() + h.tablesPerLevel("3") + h.get("600", false) + }) +} + +func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.reopenDB() + h.put("b", "v") + h.reopenDB() + h.delete("b") + h.delete("a") + h.reopenDB() + h.delete("a") + h.reopenDB() + h.put("a", "v") + h.reopenDB() + h.reopenDB() + h.getKeyVal("(a->v)") + h.waitCompaction() + h.getKeyVal("(a->v)") +} + +func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.reopenDB() + h.put("", "") + h.reopenDB() + h.delete("e") + h.put("", "") + h.reopenDB() + h.put("c", "cv") + h.reopenDB() + h.put("", "") + h.reopenDB() + h.put("", "") + h.waitCompaction() + h.reopenDB() + h.put("d", "dv") + h.reopenDB() + h.put("", "") + h.reopenDB() + h.delete("d") + h.delete("b") + h.reopenDB() + h.getKeyVal("(->)(c->cv)") + h.waitCompaction() + h.getKeyVal("(->)(c->cv)") +} + +func TestDb_SingleEntryMemCompaction(t *testing.T) { + trun(t, func(h *dbHarness) { + for i := 0; i < 10; i++ { + h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) + h.compactMem() + h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) + h.compactMem() + h.put("k", "v") + h.compactMem() + h.put("", "") + h.compactMem() + h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) + h.compactMem() + } + }) +} + +func TestDb_ManifestWriteError(t *testing.T) { + for i := 0; i < 2; i++ { + func() { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "bar") + h.getVal("foo", "bar") + + // Mem compaction (will succeed) + h.compactMem() + h.getVal("foo", "bar") + v := h.db.s.version() + if n := v.tLen(kMaxMemCompactLevel); n != 1 { + t.Errorf("invalid total tables, want=1 got=%d", n) + } + v.release() + + if i == 0 { + h.stor.SetWriteErr(storage.TypeManifest) + } else { + h.stor.SetSyncErr(storage.TypeManifest) + } + + // Merging compaction (will fail) + h.compactRangeAtErr(kMaxMemCompactLevel, "", "", true) + + h.db.Close() + h.stor.SetWriteErr(0) + h.stor.SetSyncErr(0) + + // Should not lose data + h.openDB() + h.getVal("foo", "bar") + }() + } +} + +func assertErr(t *testing.T, err error, wanterr bool) { + if err != nil { + if wanterr { + t.Log("AssertErr: got error (expected): ", err) + } else { + t.Error("AssertErr: got error: ", err) + } + } else if wanterr { + t.Error("AssertErr: expect error") + } +} + +func TestDb_ClosedIsClosed(t *testing.T) { + h := newDbHarness(t) + db := h.db + + var iter, iter2 iterator.Iterator + var snap *Snapshot + func() { + defer h.close() + + h.put("k", "v") + h.getVal("k", "v") + + iter = db.NewIterator(nil, h.ro) + iter.Seek([]byte("k")) + testKeyVal(t, iter, "k->v") + + var err error + snap, err = db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + + h.getValr(snap, "k", "v") + + iter2 = snap.NewIterator(nil, h.ro) + iter2.Seek([]byte("k")) + testKeyVal(t, iter2, "k->v") + + h.put("foo", "v2") + h.delete("foo") + + // closing DB + iter.Release() + iter2.Release() + }() + + assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) + _, err := db.Get([]byte("k"), h.ro) + assertErr(t, err, true) + + if iter.Valid() { + t.Errorf("iter.Valid should false") + } + assertErr(t, iter.Error(), false) + testKeyVal(t, iter, "->") + if iter.Seek([]byte("k")) { + t.Errorf("iter.Seek should false") + } + assertErr(t, iter.Error(), true) + + assertErr(t, iter2.Error(), false) + + _, err = snap.Get([]byte("k"), h.ro) + assertErr(t, err, true) + + _, err = db.GetSnapshot() + assertErr(t, err, true) + + iter3 := db.NewIterator(nil, h.ro) + assertErr(t, iter3.Error(), true) + + iter3 = snap.NewIterator(nil, h.ro) + assertErr(t, iter3.Error(), true) + + assertErr(t, db.Delete([]byte("k"), h.wo), true) + + _, err = db.GetProperty("leveldb.stats") + assertErr(t, err, true) + + _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) + assertErr(t, err, true) + + assertErr(t, db.CompactRange(util.Range{}), true) + + assertErr(t, db.Close(), true) +} + +type numberComparer struct{} + +func (numberComparer) num(x []byte) (n int) { + fmt.Sscan(string(x[1:len(x)-1]), &n) + return +} + +func (numberComparer) Name() string { + return "test.NumberComparer" +} + +func (p numberComparer) Compare(a, b []byte) int { + return p.num(a) - p.num(b) +} + +func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } +func (numberComparer) Successor(dst, b []byte) []byte { return nil } + +func TestDb_CustomComparer(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + Comparer: numberComparer{}, + WriteBuffer: 1000, + }) + defer h.close() + + h.put("[10]", "ten") + h.put("[0x14]", "twenty") + for i := 0; i < 2; i++ { + h.getVal("[10]", "ten") + h.getVal("[0xa]", "ten") + h.getVal("[20]", "twenty") + h.getVal("[0x14]", "twenty") + h.get("[15]", false) + h.get("[0xf]", false) + h.compactMem() + h.compactRange("[0]", "[9999]") + } + + for n := 0; n < 2; n++ { + for i := 0; i < 100; i++ { + v := fmt.Sprintf("[%d]", i*10) + h.put(v, v) + } + h.compactMem() + h.compactRange("[0]", "[1000000]") + } +} + +func TestDb_ManualCompaction(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + if kMaxMemCompactLevel != 2 { + t.Fatal("fix test to reflect the config") + } + + h.putMulti(3, "p", "q") + h.tablesPerLevel("1,1,1") + + // Compaction range falls before files + h.compactRange("", "c") + h.tablesPerLevel("1,1,1") + + // Compaction range falls after files + h.compactRange("r", "z") + h.tablesPerLevel("1,1,1") + + // Compaction range overlaps files + h.compactRange("p1", "p9") + h.tablesPerLevel("0,0,1") + + // Populate a different range + h.putMulti(3, "c", "e") + h.tablesPerLevel("1,1,2") + + // Compact just the new range + h.compactRange("b", "f") + h.tablesPerLevel("0,0,2") + + // Compact all + h.putMulti(1, "a", "z") + h.tablesPerLevel("0,1,2") + h.compactRange("", "") + h.tablesPerLevel("0,0,1") +} + +func TestDb_BloomFilter(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + BlockCache: opt.NoCache, + Filter: filter.NewBloomFilter(10), + }) + defer h.close() + + key := func(i int) string { + return fmt.Sprintf("key%06d", i) + } + + const ( + n = 10000 + indexOverhead = 19898 + filterOverhead = 19799 + ) + + // Populate multiple layers + for i := 0; i < n; i++ { + h.put(key(i), key(i)) + } + h.compactMem() + h.compactRange("a", "z") + for i := 0; i < n; i += 100 { + h.put(key(i), key(i)) + } + h.compactMem() + + // Prevent auto compactions triggered by seeks + h.stor.DelaySync(storage.TypeTable) + + // Lookup present keys. Should rarely read from small sstable. + h.stor.SetReadCounter(storage.TypeTable) + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)) + } + cnt := int(h.stor.ReadCounter()) + t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) + + if min, max := n+indexOverhead+filterOverhead, n+indexOverhead+filterOverhead+2*n/100; cnt < min || cnt > max { + t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) + } + + // Lookup missing keys. Should rarely read from either sstable. + h.stor.ResetReadCounter() + for i := 0; i < n; i++ { + h.get(key(i)+".missing", false) + } + cnt = int(h.stor.ReadCounter()) + t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) + if max := 3*n/100 + indexOverhead + filterOverhead; cnt > max { + t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) + } + + h.stor.ReleaseSync(storage.TypeTable) +} + +func TestDb_Concurrent(t *testing.T) { + const n, secs, maxkey = 4, 2, 1000 + + runtime.GOMAXPROCS(n) + trun(t, func(h *dbHarness) { + var closeWg sync.WaitGroup + var stop uint32 + var cnt [n]uint32 + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + var put, get, found uint + defer func() { + t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", + i, cnt[i], put, get, found, get-found) + closeWg.Done() + }() + + rnd := rand.New(rand.NewSource(int64(1000 + i))) + for atomic.LoadUint32(&stop) == 0 { + x := cnt[i] + + k := rnd.Intn(maxkey) + kstr := fmt.Sprintf("%016d", k) + + if (rnd.Int() % 2) > 0 { + put++ + h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) + } else { + get++ + v, err := h.db.Get([]byte(kstr), h.ro) + if err == nil { + found++ + rk, ri, rx := 0, -1, uint32(0) + fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) + if rk != k { + t.Errorf("invalid key want=%d got=%d", k, rk) + } + if ri < 0 || ri >= n { + t.Error("invalid goroutine number: ", ri) + } else { + tx := atomic.LoadUint32(&(cnt[ri])) + if rx > tx { + t.Errorf("invalid seq number, %d > %d ", rx, tx) + } + } + } else if err != ErrNotFound { + t.Error("Get: got error: ", err) + return + } + } + atomic.AddUint32(&cnt[i], 1) + } + }(i) + } + + time.Sleep(secs * time.Second) + atomic.StoreUint32(&stop, 1) + closeWg.Wait() + }) + + runtime.GOMAXPROCS(1) +} + +func TestDb_Concurrent2(t *testing.T) { + const n, n2 = 4, 4000 + + runtime.GOMAXPROCS(n*2 + 2) + truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { + var closeWg sync.WaitGroup + var stop uint32 + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 0; atomic.LoadUint32(&stop) == 0; k++ { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } + + cmp := comparer.DefaultComparer + for i := 0; i < n2; i++ { + closeWg.Add(1) + go func(i int) { + it := h.db.NewIterator(nil, nil) + var pk []byte + for it.Next() { + kk := it.Key() + if cmp.Compare(kk, pk) <= 0 { + t.Errorf("iter %d: %q is successor of %q", i, pk, kk) + } + pk = append(pk[:0], kk...) + var k, vk, vi int + if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { + t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) + } else if n < 1 { + t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) + } + if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { + t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) + } else if n < 2 { + t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) + } + + if vk != k { + t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) + } + } + if err := it.Error(); err != nil { + t.Errorf("iter %d: Got error: %v", i, err) + } + it.Release() + closeWg.Done() + }(i) + } + + atomic.StoreUint32(&stop, 1) + closeWg.Wait() + }) + + runtime.GOMAXPROCS(1) +} + +func TestDb_CreateReopenDbOnFile(t *testing.T) { + dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) + if err := os.RemoveAll(dbpath); err != nil { + t.Fatal("cannot remove old db: ", err) + } + defer os.RemoveAll(dbpath) + + for i := 0; i < 3; i++ { + stor, err := storage.OpenFile(dbpath) + if err != nil { + t.Fatalf("(%d) cannot open storage: %s", i, err) + } + db, err := Open(stor, nil) + if err != nil { + t.Fatalf("(%d) cannot open db: %s", i, err) + } + if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { + t.Fatalf("(%d) cannot write to db: %s", i, err) + } + if err := db.Close(); err != nil { + t.Fatalf("(%d) cannot close db: %s", i, err) + } + if err := stor.Close(); err != nil { + t.Fatalf("(%d) cannot close storage: %s", i, err) + } + } +} + +func TestDb_CreateReopenDbOnFile2(t *testing.T) { + dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) + if err := os.RemoveAll(dbpath); err != nil { + t.Fatal("cannot remove old db: ", err) + } + defer os.RemoveAll(dbpath) + + for i := 0; i < 3; i++ { + db, err := OpenFile(dbpath, nil) + if err != nil { + t.Fatalf("(%d) cannot open db: %s", i, err) + } + if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { + t.Fatalf("(%d) cannot write to db: %s", i, err) + } + if err := db.Close(); err != nil { + t.Fatalf("(%d) cannot close db: %s", i, err) + } + } +} + +func TestDb_DeletionMarkersOnMemdb(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "v1") + h.compactMem() + h.delete("foo") + h.get("foo", false) + h.getKeyVal("") +} + +func TestDb_LeveldbIssue178(t *testing.T) { + nKeys := (kMaxTableSize / 30) * 5 + key1 := func(i int) string { + return fmt.Sprintf("my_key_%d", i) + } + key2 := func(i int) string { + return fmt.Sprintf("my_key_%d_xxx", i) + } + + // Disable compression since it affects the creation of layers and the + // code below is trying to test against a very specific scenario. + h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + defer h.close() + + // Create first key range. + batch := new(Batch) + for i := 0; i < nKeys; i++ { + batch.Put([]byte(key1(i)), []byte("value for range 1 key")) + } + h.write(batch) + + // Create second key range. + batch.Reset() + for i := 0; i < nKeys; i++ { + batch.Put([]byte(key2(i)), []byte("value for range 2 key")) + } + h.write(batch) + + // Delete second key range. + batch.Reset() + for i := 0; i < nKeys; i++ { + batch.Delete([]byte(key2(i))) + } + h.write(batch) + h.waitMemCompaction() + + // Run manual compaction. + h.compactRange(key1(0), key1(nKeys-1)) + + // Checking the keys. + h.assertNumKeys(nKeys) +} + +func TestDb_LeveldbIssue200(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("1", "b") + h.put("2", "c") + h.put("3", "d") + h.put("4", "e") + h.put("5", "f") + + iter := h.db.NewIterator(nil, h.ro) + + // Add an element that should not be reflected in the iterator. + h.put("25", "cd") + + iter.Seek([]byte("5")) + assertBytes(t, []byte("5"), iter.Key()) + iter.Prev() + assertBytes(t, []byte("4"), iter.Key()) + iter.Prev() + assertBytes(t, []byte("3"), iter.Key()) + iter.Next() + assertBytes(t, []byte("4"), iter.Key()) + iter.Next() + assertBytes(t, []byte("5"), iter.Key()) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go new file mode 100644 index 0000000..8a95a6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go @@ -0,0 +1,97 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader is the interface that wraps basic Get and NewIterator methods. +// This interface implemented by both DB and Snapshot. +type Reader interface { + Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) + NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator +} + +type Sizes []uint64 + +// Sum returns sum of the sizes. +func (p Sizes) Sum() (n uint64) { + for _, s := range p { + n += s + } + return n +} + +// Logging. +func (db *DB) log(v ...interface{}) { db.s.log(v...) } +func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } + +// Check and clean files. +func (db *DB) checkAndCleanFiles() error { + v := db.s.version_NB() + tablesMap := make(map[uint64]bool) + for _, tables := range v.tables { + for _, t := range tables { + tablesMap[t.file.Num()] = false + } + } + + files, err := db.s.getFiles(storage.TypeAll) + if err != nil { + return err + } + + var nTables int + var rem []storage.File + for _, f := range files { + keep := true + switch f.Type() { + case storage.TypeManifest: + keep = f.Num() >= db.s.manifestFile.Num() + case storage.TypeJournal: + if db.frozenJournalFile != nil { + keep = f.Num() >= db.frozenJournalFile.Num() + } else { + keep = f.Num() >= db.journalFile.Num() + } + case storage.TypeTable: + _, keep = tablesMap[f.Num()] + if keep { + tablesMap[f.Num()] = true + nTables++ + } + } + + if !keep { + rem = append(rem, f) + } + } + + if nTables != len(tablesMap) { + for num, present := range tablesMap { + if !present { + db.logf("db@janitor table missing @%d", num) + } + } + return ErrCorrupted{Type: MissingFiles, Err: errors.New("leveldb: table files missing")} + } + + db.logf("db@janitor F·%d G·%d", len(files), len(rem)) + for _, f := range rem { + db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) + if err := f.Remove(); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go new file mode 100644 index 0000000..838f107 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -0,0 +1,290 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "time" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func (db *DB) writeJournal(b *Batch) error { + w, err := db.journal.Next() + if err != nil { + return err + } + if _, err := w.Write(b.encode()); err != nil { + return err + } + if err := db.journal.Flush(); err != nil { + return err + } + if b.sync { + return db.journalWriter.Sync() + } + return nil +} + +func (db *DB) jWriter() { + defer db.closeW.Done() + for { + select { + case b := <-db.journalC: + if b != nil { + db.journalAckC <- db.writeJournal(b) + } + case _, _ = <-db.closeC: + return + } + } +} + +func (db *DB) rotateMem(n int) (mem *memDB, err error) { + // Wait for pending memdb compaction. + err = db.compSendIdle(db.mcompCmdC) + if err != nil { + return + } + + // Create new memdb and journal. + mem, err = db.newMem(n) + if err != nil { + return + } + + // Schedule memdb compaction. + db.compTrigger(db.mcompTriggerC) + return +} + +func (db *DB) flush(n int) (mem *memDB, nn int, err error) { + delayed := false + flush := func() (retry bool) { + v := db.s.version() + defer v.release() + mem = db.getEffectiveMem() + defer func() { + if retry { + mem.decref() + mem = nil + } + }() + nn = mem.mdb.Free() + switch { + case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed: + delayed = true + time.Sleep(time.Millisecond) + case nn >= n: + return false + case v.tLen(0) >= kL0_StopWritesTrigger: + delayed = true + err = db.compSendIdle(db.tcompCmdC) + if err != nil { + return false + } + default: + // Allow memdb to grow if it has no entry. + if mem.mdb.Len() == 0 { + nn = n + } else { + mem.decref() + mem, err = db.rotateMem(n) + if err == nil { + nn = mem.mdb.Free() + } else { + nn = 0 + } + } + return false + } + return true + } + start := time.Now() + for flush() { + } + if delayed { + db.logf("db@write delayed T·%v", time.Since(start)) + } + return +} + +// Write apply the given batch to the DB. The batch will be applied +// sequentially. +// +// It is safe to modify the contents of the arguments after Write returns. +func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { + err = db.ok() + if err != nil || b == nil || b.len() == 0 { + return + } + + b.init(wo.GetSync()) + + // The write happen synchronously. +retry: + select { + case db.writeC <- b: + if <-db.writeMergedC { + return <-db.writeAckC + } + goto retry + case db.writeLockC <- struct{}{}: + case _, _ = <-db.closeC: + return ErrClosed + } + + merged := 0 + defer func() { + <-db.writeLockC + for i := 0; i < merged; i++ { + db.writeAckC <- err + } + }() + + mem, memFree, err := db.flush(b.size()) + if err != nil { + return + } + defer mem.decref() + + // Calculate maximum size of the batch. + m := 1 << 20 + if x := b.size(); x <= 128<<10 { + m = x + (128 << 10) + } + m = minInt(m, memFree) + + // Merge with other batch. +drain: + for b.size() < m && !b.sync { + select { + case nb := <-db.writeC: + if b.size()+nb.size() <= m { + b.append(nb) + db.writeMergedC <- true + merged++ + } else { + db.writeMergedC <- false + break drain + } + default: + break drain + } + } + + // Set batch first seq number relative from last seq. + b.seq = db.seq + 1 + + // Write journal concurrently if it is large enough. + if b.size() >= (128 << 10) { + // Push the write batch to the journal writer + select { + case _, _ = <-db.closeC: + err = ErrClosed + return + case db.journalC <- b: + // Write into memdb + b.memReplay(mem.mdb) + } + // Wait for journal writer + select { + case _, _ = <-db.closeC: + err = ErrClosed + return + case err = <-db.journalAckC: + if err != nil { + // Revert memdb if error detected + b.revertMemReplay(mem.mdb) + return + } + } + } else { + err = db.writeJournal(b) + if err != nil { + return + } + b.memReplay(mem.mdb) + } + + // Set last seq number. + db.addSeq(uint64(b.len())) + + if b.size() >= memFree { + db.rotateMem(0) + } + return +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Put returns. +func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { + b := new(Batch) + b.Put(key, value) + return db.Write(b, wo) +} + +// Delete deletes the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { + b := new(Batch) + b.Delete(key) + return db.Write(b, wo) +} + +func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { + iter := mem.NewIterator(nil) + defer iter.Release() + return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && + (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) +} + +// CompactRange compacts the underlying DB for the given key range. +// In particular, deleted and overwritten versions are discarded, +// and the data is rearranged to reduce the cost of operations +// needed to access the data. This operation should typically only +// be invoked by users who understand the underlying implementation. +// +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +// Therefore if both is nil then it will compact entire DB. +func (db *DB) CompactRange(r util.Range) error { + if err := db.ok(); err != nil { + return err + } + + select { + case db.writeLockC <- struct{}{}: + case _, _ = <-db.closeC: + return ErrClosed + } + + // Check for overlaps in memdb. + mem := db.getEffectiveMem() + defer mem.decref() + if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) { + // Memdb compaction. + if _, err := db.rotateMem(0); err != nil { + <-db.writeLockC + return err + } + <-db.writeLockC + if err := db.compSendIdle(db.mcompCmdC); err != nil { + return err + } + } else { + <-db.writeLockC + } + + // Table compaction. + return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go new file mode 100644 index 0000000..53f13bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go @@ -0,0 +1,90 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package leveldb provides implementation of LevelDB key/value database. +// +// Create or open a database: +// +// db, err := leveldb.OpenFile("path/to/db", nil) +// ... +// defer db.Close() +// ... +// +// Read or modify the database content: +// +// // Remember that the contents of the returned slice should not be modified. +// data, err := db.Get([]byte("key"), nil) +// ... +// err = db.Put([]byte("key"), []byte("value"), nil) +// ... +// err = db.Delete([]byte("key"), nil) +// ... +// +// Iterate over database content: +// +// iter := db.NewIterator(nil, nil) +// for iter.Next() { +// // Remember that the contents of the returned slice should not be modified, and +// // only valid until the next call to Next. +// key := iter.Key() +// value := iter.Value() +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content with a particular prefix: +// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Seek-then-Iterate: +// +// iter := db.NewIterator(nil, nil) +// for ok := iter.Seek(key); ok; ok = iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content: +// +// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Batch writes: +// +// batch := new(leveldb.Batch) +// batch.Put([]byte("foo"), []byte("value")) +// batch.Put([]byte("bar"), []byte("another value")) +// batch.Delete([]byte("baz")) +// err = db.Write(batch, nil) +// ... +// +// Use bloom filter: +// +// o := &opt.Options{ +// Filter: filter.NewBloomFilter(10), +// } +// db, err := leveldb.OpenFile("path/to/db", o) +// ... +// defer db.Close() +// ... +package leveldb diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go new file mode 100644 index 0000000..f852334 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go @@ -0,0 +1,38 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrNotFound = util.ErrNotFound + ErrSnapshotReleased = errors.New("leveldb: snapshot released") + ErrIterReleased = errors.New("leveldb: iterator released") + ErrClosed = errors.New("leveldb: closed") +) + +type CorruptionType int + +const ( + CorruptedManifest CorruptionType = iota + MissingFiles +) + +// ErrCorrupted is the type that wraps errors that indicate corruption in +// the database. +type ErrCorrupted struct { + Type CorruptionType + Err error +} + +func (e ErrCorrupted) Error() string { + return e.Err.Error() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go new file mode 100644 index 0000000..3056724 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go @@ -0,0 +1,57 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Leveldb external", func() { + o := &opt.Options{ + BlockCache: opt.NoCache, + BlockRestartInterval: 5, + BlockSize: 50, + Compression: opt.NoCompression, + CachedOpenFiles: -1, + Strict: opt.StrictAll, + WriteBuffer: 1000, + } + + Describe("write test", func() { + It("should do write correctly", func(done Done) { + db := newTestingDB(o, nil, nil) + t := testutil.DBTesting{ + DB: db, + Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), + } + testutil.DoDBTesting(&t) + db.TestClose() + done <- true + }, 20.0) + }) + + Describe("read test", func() { + testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { + // Building the DB. + db := newTestingDB(o, nil, nil) + kv.IterateShuffled(nil, func(i int, key, value []byte) { + err := db.TestPut(key, value) + Expect(err).NotTo(HaveOccurred()) + }) + + return db + }, func(db testutil.DB) { + db.(*testingDB).TestClose() + }) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go new file mode 100644 index 0000000..684393b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go @@ -0,0 +1,31 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" +) + +type iFilter struct { + filter.Filter +} + +func (f iFilter) Contains(filter, key []byte) bool { + return f.Filter.Contains(filter, iKey(key).ukey()) +} + +func (f iFilter) NewGenerator() filter.FilterGenerator { + return iFilterGenerator{f.Filter.NewGenerator()} +} + +type iFilterGenerator struct { + filter.FilterGenerator +} + +func (g iFilterGenerator) Add(key []byte) { + g.FilterGenerator.Add(iKey(key).ukey()) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go new file mode 100644 index 0000000..ea9570e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go @@ -0,0 +1,116 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func bloomHash(key []byte) uint32 { + return util.Hash(key, 0xbc9f1d34) +} + +type bloomFilter int + +// The bloom filter serializes its parameters and is backward compatible +// with respect to them. Therefor, its parameters are not added to its +// name. +func (bloomFilter) Name() string { + return "leveldb.BuiltinBloomFilter" +} + +func (f bloomFilter) Contains(filter, key []byte) bool { + nBytes := len(filter) - 1 + if nBytes < 1 { + return false + } + nBits := uint32(nBytes * 8) + + // Use the encoded k so that we can read filters generated by + // bloom filters created using different parameters. + k := filter[nBytes] + if k > 30 { + // Reserved for potentially new encodings for short bloom filters. + // Consider it a match. + return true + } + + kh := bloomHash(key) + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < k; j++ { + bitpos := kh % nBits + if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { + return false + } + kh += delta + } + return true +} + +func (f bloomFilter) NewGenerator() FilterGenerator { + // Round down to reduce probing cost a little bit. + k := uint8(f * 69 / 100) // 0.69 =~ ln(2) + if k < 1 { + k = 1 + } else if k > 30 { + k = 30 + } + return &bloomFilterGenerator{ + n: int(f), + k: k, + } +} + +type bloomFilterGenerator struct { + n int + k uint8 + + keyHashes []uint32 +} + +func (g *bloomFilterGenerator) Add(key []byte) { + // Use double-hashing to generate a sequence of hash values. + // See analysis in [Kirsch,Mitzenmacher 2006]. + g.keyHashes = append(g.keyHashes, bloomHash(key)) +} + +func (g *bloomFilterGenerator) Generate(b Buffer) { + // Compute bloom filter size (in both bits and bytes) + nBits := uint32(len(g.keyHashes) * g.n) + // For small n, we can see a very high false positive rate. Fix it + // by enforcing a minimum bloom filter length. + if nBits < 64 { + nBits = 64 + } + nBytes := (nBits + 7) / 8 + nBits = nBytes * 8 + + dest := b.Alloc(int(nBytes) + 1) + dest[nBytes] = g.k + for _, kh := range g.keyHashes { + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < g.k; j++ { + bitpos := kh % nBits + dest[bitpos/8] |= (1 << (bitpos % 8)) + kh += delta + } + } + + g.keyHashes = g.keyHashes[:0] +} + +// NewBloomFilter creates a new initialized bloom filter for given +// bitsPerKey. +// +// Since bitsPerKey is persisted individually for each bloom filter +// serialization, bloom filters are backwards compatible with respect to +// changing bitsPerKey. This means that no big performance penalty will +// be experienced when changing the parameter. See documentation for +// opt.Options.Filter for more information. +func NewBloomFilter(bitsPerKey int) Filter { + return bloomFilter(bitsPerKey) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go new file mode 100644 index 0000000..e0182e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "encoding/binary" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" + "testing" +) + +type harness struct { + t *testing.T + + bloom Filter + generator FilterGenerator + filter []byte +} + +func newHarness(t *testing.T) *harness { + bloom := NewBloomFilter(10) + return &harness{ + t: t, + bloom: bloom, + generator: bloom.NewGenerator(), + } +} + +func (h *harness) add(key []byte) { + h.generator.Add(key) +} + +func (h *harness) addNum(key uint32) { + var b [4]byte + binary.LittleEndian.PutUint32(b[:], key) + h.add(b[:]) +} + +func (h *harness) build() { + b := &util.Buffer{} + h.generator.Generate(b) + h.filter = b.Bytes() +} + +func (h *harness) reset() { + h.filter = nil +} + +func (h *harness) filterLen() int { + return len(h.filter) +} + +func (h *harness) assert(key []byte, want, silent bool) bool { + got := h.bloom.Contains(h.filter, key) + if !silent && got != want { + h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) + } + return got +} + +func (h *harness) assertNum(key uint32, want, silent bool) bool { + var b [4]byte + binary.LittleEndian.PutUint32(b[:], key) + return h.assert(b[:], want, silent) +} + +func TestBloomFilter_Empty(t *testing.T) { + h := newHarness(t) + h.build() + h.assert([]byte("hello"), false, false) + h.assert([]byte("world"), false, false) +} + +func TestBloomFilter_Small(t *testing.T) { + h := newHarness(t) + h.add([]byte("hello")) + h.add([]byte("world")) + h.build() + h.assert([]byte("hello"), true, false) + h.assert([]byte("world"), true, false) + h.assert([]byte("x"), false, false) + h.assert([]byte("foo"), false, false) +} + +func nextN(n int) int { + switch { + case n < 10: + n += 1 + case n < 100: + n += 10 + case n < 1000: + n += 100 + default: + n += 1000 + } + return n +} + +func TestBloomFilter_VaryingLengths(t *testing.T) { + h := newHarness(t) + var mediocre, good int + for n := 1; n < 10000; n = nextN(n) { + h.reset() + for i := 0; i < n; i++ { + h.addNum(uint32(i)) + } + h.build() + + got := h.filterLen() + want := (n * 10 / 8) + 40 + if got > want { + t.Errorf("filter len test failed, '%d' > '%d'", got, want) + } + + for i := 0; i < n; i++ { + h.assertNum(uint32(i), true, false) + } + + var rate float32 + for i := 0; i < 10000; i++ { + if h.assertNum(uint32(i+1000000000), true, true) { + rate++ + } + } + rate /= 10000 + if rate > 0.02 { + t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) + } + if rate > 0.0125 { + mediocre++ + } else { + good++ + } + } + t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) + if mediocre > good/5 { + t.Error("mediocre false positive rate is more than expected") + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go new file mode 100644 index 0000000..7a925c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go @@ -0,0 +1,60 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package filter provides interface and implementation of probabilistic +// data structure. +// +// The filter is resposible for creating small filter from a set of keys. +// These filter will then used to test whether a key is a member of the set. +// In many cases, a filter can cut down the number of disk seeks from a +// handful to a single disk seek per DB.Get call. +package filter + +// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. +type Buffer interface { + // Alloc allocs n bytes of slice from the buffer. This also advancing + // write offset. + Alloc(n int) []byte + + // Write appends the contents of p to the buffer. + Write(p []byte) (n int, err error) + + // WriteByte appends the byte c to the buffer. + WriteByte(c byte) error +} + +// Filter is the filter. +type Filter interface { + // Name returns the name of this policy. + // + // Note that if the filter encoding changes in an incompatible way, + // the name returned by this method must be changed. Otherwise, old + // incompatible filters may be passed to methods of this type. + Name() string + + // NewGenerator creates a new filter generator. + NewGenerator() FilterGenerator + + // Contains returns true if the filter contains the given key. + // + // The filter are filters generated by the filter generator. + Contains(filter, key []byte) bool +} + +// FilterGenerator is the filter generator. +type FilterGenerator interface { + // Add adds a key to the filter generator. + // + // The key may become invalid after call to this method end, therefor + // key must be copied if implementation require keeping key for later + // use. The key should not modified directly, doing so may cause + // undefined results. + Add(key []byte) + + // Generate generates filters based on keys passed so far. After call + // to Generate the filter generator maybe resetted, depends on implementation. + Generate(b Buffer) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go new file mode 100644 index 0000000..e76657e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go @@ -0,0 +1,58 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build go1.3 + +package leveldb + +import ( + "sync/atomic" + "testing" +) + +func BenchmarkDBReadConcurrent(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + for pb.Next() && iter.Next() { + } + }) +} + +func BenchmarkDBReadConcurrent2(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + var dir uint32 + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + if atomic.AddUint32(&dir, 1)%2 == 0 { + for pb.Next() && iter.Next() { + } + } else { + if pb.Next() && iter.Last() { + for pb.Next() && iter.Prev() { + } + } + } + }) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go new file mode 100644 index 0000000..846aa11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go @@ -0,0 +1,158 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// BasicArray is the interface that wraps basic Len and Search method. +type BasicArray interface { + // Len returns length of the array. + Len() int + + // Search finds smallest index that point to a key that is greater + // than or equal to the given key. + Search(key []byte) int +} + +// Array is the interface that wraps BasicArray and basic Index method. +type Array interface { + BasicArray + + // Index returns key/value pair with index of i. + Index(i int) (key, value []byte) +} + +// Array is the interface that wraps BasicArray and basic Get method. +type ArrayIndexer interface { + BasicArray + + // Get returns a new data iterator with index of i. + Get(i int) Iterator +} + +type basicArrayIterator struct { + util.BasicReleaser + array BasicArray + pos int +} + +func (i *basicArrayIterator) Valid() bool { + return i.pos >= 0 && i.pos < i.array.Len() +} + +func (i *basicArrayIterator) First() bool { + if i.array.Len() == 0 { + i.pos = -1 + return false + } + i.pos = 0 + return true +} + +func (i *basicArrayIterator) Last() bool { + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = n - 1 + return true +} + +func (i *basicArrayIterator) Seek(key []byte) bool { + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = i.array.Search(key) + if i.pos >= n { + return false + } + return true +} + +func (i *basicArrayIterator) Next() bool { + i.pos++ + if n := i.array.Len(); i.pos >= n { + i.pos = n + return false + } + return true +} + +func (i *basicArrayIterator) Prev() bool { + i.pos-- + if i.pos < 0 { + i.pos = -1 + return false + } + return true +} + +func (i *basicArrayIterator) Error() error { return nil } + +type arrayIterator struct { + basicArrayIterator + array Array + pos int + key, value []byte +} + +func (i *arrayIterator) updateKV() { + if i.pos == i.basicArrayIterator.pos { + return + } + i.pos = i.basicArrayIterator.pos + if i.Valid() { + i.key, i.value = i.array.Index(i.pos) + } else { + i.key = nil + i.value = nil + } +} + +func (i *arrayIterator) Key() []byte { + i.updateKV() + return i.key +} + +func (i *arrayIterator) Value() []byte { + i.updateKV() + return i.value +} + +type arrayIteratorIndexer struct { + basicArrayIterator + array ArrayIndexer +} + +func (i *arrayIteratorIndexer) Get() Iterator { + if i.Valid() { + return i.array.Get(i.basicArrayIterator.pos) + } + return nil +} + +// NewArrayIterator returns an iterator from the given array. +func NewArrayIterator(array Array) Iterator { + return &arrayIterator{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + pos: -1, + } +} + +// NewArrayIndexer returns an index iterator from the given array. +func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { + return &arrayIteratorIndexer{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go new file mode 100644 index 0000000..296004b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go @@ -0,0 +1,30 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + . "github.com/onsi/ginkgo" + + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Array iterator", func() { + It("Should iterates and seeks correctly", func() { + // Build key/value. + kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewArrayIterator(kv), + } + testutil.DoIteratorTesting(&t) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go new file mode 100644 index 0000000..da07f82 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go @@ -0,0 +1,221 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// IteratorIndexer is the interface that wraps CommonIterator and basic Get +// method. IteratorIndexer provides index for indexed iterator. +type IteratorIndexer interface { + CommonIterator + + // Get returns a new data iterator for the current position, or nil if + // done. + Get() Iterator +} + +type indexedIterator struct { + util.BasicReleaser + index IteratorIndexer + strict bool + strictGet bool + + data Iterator + err error + errf func(err error) +} + +func (i *indexedIterator) setData() { + if i.data != nil { + i.data.Release() + } + i.data = i.index.Get() + if i.strictGet { + if err := i.data.Error(); err != nil { + i.err = err + } + } +} + +func (i *indexedIterator) clearData() { + if i.data != nil { + i.data.Release() + } + i.data = nil +} + +func (i *indexedIterator) dataErr() bool { + if i.errf != nil { + if err := i.data.Error(); err != nil { + i.errf(err) + } + } + if i.strict { + if err := i.data.Error(); err != nil { + i.err = err + return true + } + } + return false +} + +func (i *indexedIterator) Valid() bool { + return i.data != nil && i.data.Valid() +} + +func (i *indexedIterator) First() bool { + if i.err != nil { + return false + } + + if !i.index.First() { + i.clearData() + return false + } + i.setData() + return i.Next() +} + +func (i *indexedIterator) Last() bool { + if i.err != nil { + return false + } + + if !i.index.Last() { + i.clearData() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + return true +} + +func (i *indexedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } + + if !i.index.Seek(key) { + i.clearData() + return false + } + i.setData() + if !i.data.Seek(key) { + if i.dataErr() { + return false + } + i.clearData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Next() bool { + if i.err != nil { + return false + } + + switch { + case i.data != nil && !i.data.Next(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Next() { + return false + } + i.setData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Prev() bool { + if i.err != nil { + return false + } + + switch { + case i.data != nil && !i.data.Prev(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Prev() { + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + } + return true +} + +func (i *indexedIterator) Key() []byte { + if i.data == nil { + return nil + } + return i.data.Key() +} + +func (i *indexedIterator) Value() []byte { + if i.data == nil { + return nil + } + return i.data.Value() +} + +func (i *indexedIterator) Release() { + i.clearData() + i.index.Release() + i.BasicReleaser.Release() +} + +func (i *indexedIterator) Error() error { + if i.err != nil { + return i.err + } + if err := i.index.Error(); err != nil { + return err + } + return nil +} + +func (i *indexedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewIndexedIterator returns an indexed iterator. An index is iterator +// that returns another iterator, a data iterator. A data iterator is the +// iterator that contains actual key/value pairs. +// +// If strict is true then error yield by data iterator will halt the indexed +// iterator, on contrary if strict is false then the indexed iterator will +// ignore those error and move on to the next index. If strictGet is true and +// index.Get() yield an 'error iterator' then the indexed iterator will be halted. +// An 'error iterator' is iterator which its Error() method always return non-nil +// even before any 'seeks method' is called. +func NewIndexedIterator(index IteratorIndexer, strict, strictGet bool) Iterator { + return &indexedIterator{index: index, strict: strict, strictGet: strictGet} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go new file mode 100644 index 0000000..eb6ce78 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go @@ -0,0 +1,83 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + "sort" + + . "github.com/onsi/ginkgo" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +type keyValue struct { + key []byte + testutil.KeyValue +} + +type keyValueIndex []keyValue + +func (x keyValueIndex) Search(key []byte) int { + return sort.Search(x.Len(), func(i int) bool { + return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 + }) +} + +func (x keyValueIndex) Len() int { return len(x) } +func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } +func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } + +var _ = testutil.Defer(func() { + Describe("Indexed iterator", func() { + Test := func(n ...int) func() { + if len(n) == 0 { + rnd := testutil.NewRand() + n = make([]int, rnd.Intn(17)+3) + for i := range n { + n[i] = rnd.Intn(19) + 1 + } + } + + return func() { + It("Should iterates and seeks correctly", func(done Done) { + // Build key/value. + index := make(keyValueIndex, len(n)) + sum := 0 + for _, x := range n { + sum += x + } + kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) + for i, j := 0, 0; i < len(n); i++ { + for x := n[i]; x > 0; x-- { + key, value := kv.Index(j) + index[i].key = key + index[i].Put(key, value) + j++ + } + } + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewIndexedIterator(NewArrayIndexer(index), true, true), + } + testutil.DoIteratorTesting(&t) + done <- true + }, 1.5) + } + } + + Describe("with 100 keys", Test(100)) + Describe("with 50-50 keys", Test(50, 50)) + Describe("with 50-1 keys", Test(50, 1)) + Describe("with 50-1-50 keys", Test(50, 1, 50)) + Describe("with 1-50 keys", Test(1, 50)) + Describe("with random N-keys", Test()) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go new file mode 100644 index 0000000..0cc6f96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package iterator provides interface and implementation to traverse over +// contents of a database. +package iterator + +import ( + "errors" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// IteratorSeeker is the interface that wraps the 'seeks method'. +type IteratorSeeker interface { + // First moves the iterator to the first key/value pair. If the iterator + // only contains one key/value pair then First and Last whould moves + // to the same key/value pair. + // It returns whether such pair exist. + First() bool + + // Last moves the iterator to the last key/value pair. If the iterator + // only contains one key/value pair then First and Last whould moves + // to the same key/value pair. + // It returns whether such pair exist. + Last() bool + + // Seek moves the iterator to the first key/value pair whose key is greater + // than or equal to the given key. + // It returns whether such pair exist. + // + // It is safe to modify the contents of the argument after Seek returns. + Seek(key []byte) bool + + // Next moves the iterator to the next key/value pair. + // It returns whether the iterator is exhausted. + Next() bool + + // Prev moves the iterator to the previous key/value pair. + // It returns whether the iterator is exhausted. + Prev() bool +} + +// CommonIterator is the interface that wraps common interator methods. +type CommonIterator interface { + IteratorSeeker + + // util.Releaser is the interface that wraps basic Release method. + // When called Release will releases any resources associated with the + // iterator. + util.Releaser + + // util.ReleaseSetter is the interface that wraps the basic SetReleaser + // method. + util.ReleaseSetter + + // TODO: Remove this when ready. + Valid() bool + + // Error returns any accumulated error. Exhausting all the key/value pairs + // is not considered to be an error. + Error() error +} + +// Iterator iterates over a DB's key/value pairs in key order. +// +// When encouter an error any 'seeks method' will return false and will +// yield no key/value pairs. The error can be queried by calling the Error +// method. Calling Release is still necessary. +// +// An iterator must be released after use, but it is not necessary to read +// an iterator until exhaustion. +// Also, an iterator is not necessarily goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +type Iterator interface { + CommonIterator + + // Key returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Key() []byte + + // Value returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Value() []byte +} + +// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback +// method. +// +// ErrorCallbackSetter implemented by indexed and merged iterator. +type ErrorCallbackSetter interface { + // SetErrorCallback allows set an error callback of the coresponding + // iterator. Use nil to clear the callback. + SetErrorCallback(f func(err error)) +} + +type emptyIterator struct { + releaser util.Releaser + released bool + err error +} + +func (i *emptyIterator) rErr() { + if i.err == nil && i.released { + i.err = errors.New("leveldb/iterator: iterator released") + } +} + +func (i *emptyIterator) Release() { + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + i.released = true +} + +func (i *emptyIterator) SetReleaser(releaser util.Releaser) { + if !i.released { + i.releaser = releaser + } +} + +func (*emptyIterator) Valid() bool { return false } +func (i *emptyIterator) First() bool { i.rErr(); return false } +func (i *emptyIterator) Last() bool { i.rErr(); return false } +func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } +func (i *emptyIterator) Next() bool { i.rErr(); return false } +func (i *emptyIterator) Prev() bool { i.rErr(); return false } +func (*emptyIterator) Key() []byte { return nil } +func (*emptyIterator) Value() []byte { return nil } +func (i *emptyIterator) Error() error { return i.err } + +// NewEmptyIterator creates an empty iterator. The err parameter can be +// nil, but if not nil the given err will be returned by Error method. +func NewEmptyIterator(err error) Iterator { + return &emptyIterator{err: err} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go new file mode 100644 index 0000000..8dd1b48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go @@ -0,0 +1,17 @@ +package iterator_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestIterator(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Iterator Suite") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go new file mode 100644 index 0000000..ad98a32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go @@ -0,0 +1,307 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "errors" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrIterReleased = errors.New("leveldb/iterator: iterator released") +) + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type mergedIterator struct { + cmp comparer.Comparer + iters []Iterator + strict bool + + keys [][]byte + index int + dir dir + err error + errf func(err error) + releaser util.Releaser +} + +func assertKey(key []byte) []byte { + if key == nil { + panic("leveldb/iterator: nil key") + } + return key +} + +func (i *mergedIterator) iterErr(iter Iterator) bool { + if i.errf != nil { + if err := iter.Error(); err != nil { + i.errf(err) + } + } + if i.strict { + if err := iter.Error(); err != nil { + i.err = err + return true + } + } + return false +} + +func (i *mergedIterator) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *mergedIterator) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.First(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirEOI + return i.prev() +} + +func (i *mergedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Seek(key): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) next() bool { + var key []byte + if i.dir == dirForward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirEOI + return false + } + i.dir = dirForward + return true +} + +func (i *mergedIterator) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirSOI: + return i.First() + case dirBackward: + key := append([]byte{}, i.keys[i.index]...) + if !i.Seek(key) { + return false + } + return i.Next() + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Next(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.next() +} + +func (i *mergedIterator) prev() bool { + var key []byte + if i.dir == dirBackward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirSOI + return false + } + i.dir = dirBackward + return true +} + +func (i *mergedIterator) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + key := append([]byte{}, i.keys[i.index]...) + for x, iter := range i.iters { + if x == i.index { + continue + } + seek := iter.Seek(key) + switch { + case seek && iter.Prev(), !seek && iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Prev(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.prev() +} + +func (i *mergedIterator) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.keys[i.index] +} + +func (i *mergedIterator) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.iters[i.index].Value() +} + +func (i *mergedIterator) Release() { + if i.dir != dirReleased { + i.dir = dirReleased + for _, iter := range i.iters { + iter.Release() + } + i.iters = nil + i.keys = nil + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *mergedIterator) SetReleaser(releaser util.Releaser) { + if i.dir != dirReleased { + i.releaser = releaser + } +} + +func (i *mergedIterator) Error() error { + return i.err +} + +func (i *mergedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewMergedIterator returns an iterator that merges its input. Walking the +// resultant iterator will return all key/value pairs of all input iterators +// in strictly increasing key order, as defined by cmp. +// The input's key ranges may overlap, but there are assumed to be no duplicate +// keys: if iters[i] contains a key k then iters[j] will not contain that key k. +// None of the iters may be nil. +// +// If strict is true then error yield by any iterators will halt the merged +// iterator, on contrary if strict is false then the merged iterator will +// ignore those error and move on to the next iterator. +func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { + return &mergedIterator{ + iters: iters, + cmp: cmp, + strict: strict, + keys: make([][]byte, len(iters)), + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go new file mode 100644 index 0000000..25f2aa8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go @@ -0,0 +1,60 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Merged iterator", func() { + Test := func(filled int, empty int) func() { + return func() { + It("Should iterates and seeks correctly", func(done Done) { + rnd := testutil.NewRand() + + // Build key/value. + filledKV := make([]testutil.KeyValue, filled) + kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) + kv.Iterate(func(i int, key, value []byte) { + filledKV[rnd.Intn(filled)].Put(key, value) + }) + + // Create itearators. + iters := make([]Iterator, filled+empty) + for i := range iters { + if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { + filled-- + Expect(filledKV[filled].Len()).ShouldNot(BeZero()) + iters[i] = NewArrayIterator(filledKV[filled]) + } else { + empty-- + iters[i] = NewEmptyIterator(nil) + } + } + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), + } + testutil.DoIteratorTesting(&t) + done <- true + }, 1.5) + } + } + + Describe("with three, all filled iterators", Test(3, 0)) + Describe("with one filled, one empty iterators", Test(1, 1)) + Describe("with one filled, two empty iterators", Test(1, 2)) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go new file mode 100644 index 0000000..e23d3c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go @@ -0,0 +1,520 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +// Package journal reads and writes sequences of journals. Each journal is a stream +// of bytes that completes before the next journal starts. +// +// When reading, call Next to obtain an io.Reader for the next journal. Next will +// return io.EOF when there are no more journals. It is valid to call Next +// without reading the current journal to exhaustion. +// +// When writing, call Next to obtain an io.Writer for the next journal. Calling +// Next finishes the current journal. Call Close to finish the final journal. +// +// Optionally, call Flush to finish the current journal and flush the underlying +// writer without starting a new journal. To start a new journal after flushing, +// call Next. +// +// Neither Readers or Writers are safe to use concurrently. +// +// Example code: +// func read(r io.Reader) ([]string, error) { +// var ss []string +// journals := journal.NewReader(r, nil, true, true) +// for { +// j, err := journals.Next() +// if err == io.EOF { +// break +// } +// if err != nil { +// return nil, err +// } +// s, err := ioutil.ReadAll(j) +// if err != nil { +// return nil, err +// } +// ss = append(ss, string(s)) +// } +// return ss, nil +// } +// +// func write(w io.Writer, ss []string) error { +// journals := journal.NewWriter(w) +// for _, s := range ss { +// j, err := journals.Next() +// if err != nil { +// return err +// } +// if _, err := j.Write([]byte(s)), err != nil { +// return err +// } +// } +// return journals.Close() +// } +// +// The wire format is that the stream is divided into 32KiB blocks, and each +// block contains a number of tightly packed chunks. Chunks cannot cross block +// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a +// block must be zero. +// +// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 +// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) +// followed by a payload. The checksum is over the chunk type and the payload. +// +// There are four chunk types: whether the chunk is the full journal, or the +// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal +// has one first chunk, zero or more middle chunks, and one last chunk. +// +// The wire format allows for limited recovery in the face of data corruption: +// on a format error (such as a checksum mismatch), the reader moves to the +// next block and looks for the next full or first chunk. +package journal + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// These constants are part of the wire format and should not be changed. +const ( + fullChunkType = 1 + firstChunkType = 2 + middleChunkType = 3 + lastChunkType = 4 +) + +const ( + blockSize = 32 * 1024 + headerSize = 7 +) + +type flusher interface { + Flush() error +} + +// ErrCorrupted is the error type that generated by corrupted block or chunk. +type ErrCorrupted struct { + Size int + Reason string +} + +func (e ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) +} + +// Dropper is the interface that wrap simple Drop method. The Drop +// method will be called when the journal reader dropping a block or chunk. +type Dropper interface { + Drop(err error) +} + +// Reader reads journals from an underlying io.Reader. +type Reader struct { + // r is the underlying reader. + r io.Reader + // the dropper. + dropper Dropper + // strict flag. + strict bool + // checksum flag. + checksum bool + // seq is the sequence number of the current journal. + seq int + // buf[i:j] is the unread portion of the current chunk's payload. + // The low bound, i, excludes the chunk header. + i, j int + // n is the number of bytes of buf that are valid. Once reading has started, + // only the final block can have n < blockSize. + n int + // last is whether the current chunk is the last chunk of the journal. + last bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewReader returns a new reader. The dropper may be nil, and if +// strict is true then corrupted or invalid chunk will halt the journal +// reader entirely. +func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { + return &Reader{ + r: r, + dropper: dropper, + strict: strict, + checksum: checksum, + last: true, + } +} + +var errSkip = errors.New("leveldb/journal: skipped") + +func (r *Reader) corrupt(n int, reason string, skip bool) error { + if r.dropper != nil { + r.dropper.Drop(ErrCorrupted{n, reason}) + } + if r.strict && !skip { + r.err = ErrCorrupted{n, reason} + return r.err + } + return errSkip +} + +// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the +// next block into the buffer if necessary. +func (r *Reader) nextChunk(first bool) error { + for { + if r.j+headerSize <= r.n { + checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) + length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) + chunkType := r.buf[r.j+6] + + if checksum == 0 && length == 0 && chunkType == 0 { + // Drop entire block. + m := r.n - r.j + r.i = r.n + r.j = r.n + return r.corrupt(m, "zero header", false) + } else { + m := r.n - r.j + r.i = r.j + headerSize + r.j = r.j + headerSize + int(length) + if r.j > r.n { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(m, "chunk length overflows block", false) + } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(m, "checksum mismatch", false) + } + } + if first && chunkType != fullChunkType && chunkType != firstChunkType { + m := r.j - r.i + r.i = r.j + // Report the error, but skip it. + return r.corrupt(m+headerSize, "orphan chunk", true) + } + r.last = chunkType == fullChunkType || chunkType == lastChunkType + return nil + } + + // The last block. + if r.n < blockSize && r.n > 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + + // Read block. + n, err := io.ReadFull(r.r, r.buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return err + } + if n == 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + r.i, r.j, r.n = 0, 0, n + } +} + +// Next returns a reader for the next journal. It returns io.EOF if there are no +// more journals. The reader returned becomes stale after the next Next call, +// and should no longer be used. If strict is false, the reader will returns +// io.ErrUnexpectedEOF error when found corrupted journal. +func (r *Reader) Next() (io.Reader, error) { + r.seq++ + if r.err != nil { + return nil, r.err + } + r.i = r.j + for { + if err := r.nextChunk(true); err == nil { + break + } else if err != errSkip { + return nil, err + } + } + return &singleReader{r, r.seq, nil}, nil +} + +// Reset resets the journal reader, allows reuse of the journal reader. Reset returns +// last accumulated error. +func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { + r.seq++ + err := r.err + r.r = reader + r.dropper = dropper + r.strict = strict + r.checksum = checksum + r.i = 0 + r.j = 0 + r.n = 0 + r.last = true + r.err = nil + return err +} + +type singleReader struct { + r *Reader + seq int + err error +} + +func (x *singleReader) Read(p []byte) (int, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + n := copy(p, r.buf[r.i:r.j]) + r.i += n + return n, nil +} + +func (x *singleReader) ReadByte() (byte, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + c := r.buf[r.i] + r.i++ + return c, nil +} + +// Writer writes journals to an underlying io.Writer. +type Writer struct { + // w is the underlying writer. + w io.Writer + // seq is the sequence number of the current journal. + seq int + // f is w as a flusher. + f flusher + // buf[i:j] is the bytes that will become the current chunk. + // The low bound, i, includes the chunk header. + i, j int + // buf[:written] has already been written to w. + // written is zero unless Flush has been called. + written int + // first is whether the current chunk is the first chunk of the journal. + first bool + // pending is whether a chunk is buffered but not yet written. + pending bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewWriter returns a new Writer. +func NewWriter(w io.Writer) *Writer { + f, _ := w.(flusher) + return &Writer{ + w: w, + f: f, + } +} + +// fillHeader fills in the header for the pending chunk. +func (w *Writer) fillHeader(last bool) { + if w.i+headerSize > w.j || w.j > blockSize { + panic("leveldb/journal: bad writer state") + } + if last { + if w.first { + w.buf[w.i+6] = fullChunkType + } else { + w.buf[w.i+6] = lastChunkType + } + } else { + if w.first { + w.buf[w.i+6] = firstChunkType + } else { + w.buf[w.i+6] = middleChunkType + } + } + binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) + binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) +} + +// writeBlock writes the buffered block to the underlying writer, and reserves +// space for the next chunk's header. +func (w *Writer) writeBlock() { + _, w.err = w.w.Write(w.buf[w.written:]) + w.i = 0 + w.j = headerSize + w.written = 0 +} + +// writePending finishes the current journal and writes the buffer to the +// underlying writer. +func (w *Writer) writePending() { + if w.err != nil { + return + } + if w.pending { + w.fillHeader(true) + w.pending = false + } + _, w.err = w.w.Write(w.buf[w.written:w.j]) + w.written = w.j +} + +// Close finishes the current journal and closes the writer. +func (w *Writer) Close() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + w.err = errors.New("leveldb/journal: closed Writer") + return nil +} + +// Flush finishes the current journal, writes to the underlying writer, and +// flushes it if that writer implements interface{ Flush() error }. +func (w *Writer) Flush() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + if w.f != nil { + w.err = w.f.Flush() + return w.err + } + return nil +} + +// Reset resets the journal writer, allows reuse of the journal writer. Reset +// will also closes the journal writer if not already. +func (w *Writer) Reset(writer io.Writer) (err error) { + w.seq++ + if w.err == nil { + w.writePending() + err = w.err + } + w.w = writer + w.f, _ = writer.(flusher) + w.i = 0 + w.j = 0 + w.written = 0 + w.first = false + w.pending = false + w.err = nil + return +} + +// Next returns a writer for the next journal. The writer returned becomes stale +// after the next Close, Flush or Next call, and should no longer be used. +func (w *Writer) Next() (io.Writer, error) { + w.seq++ + if w.err != nil { + return nil, w.err + } + if w.pending { + w.fillHeader(true) + } + w.i = w.j + w.j = w.j + headerSize + // Check if there is room in the block for the header. + if w.j > blockSize { + // Fill in the rest of the block with zeroes. + for k := w.i; k < blockSize; k++ { + w.buf[k] = 0 + } + w.writeBlock() + if w.err != nil { + return nil, w.err + } + } + w.first = true + w.pending = true + return singleWriter{w, w.seq}, nil +} + +type singleWriter struct { + w *Writer + seq int +} + +func (x singleWriter) Write(p []byte) (int, error) { + w := x.w + if w.seq != x.seq { + return 0, errors.New("leveldb/journal: stale writer") + } + if w.err != nil { + return 0, w.err + } + n0 := len(p) + for len(p) > 0 { + // Write a block, if it is full. + if w.j == blockSize { + w.fillHeader(false) + w.writeBlock() + if w.err != nil { + return 0, w.err + } + w.first = false + } + // Copy bytes into the buffer. + n := copy(w.buf[w.j:], p) + w.j += n + p = p[n:] + } + return n0, nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go new file mode 100644 index 0000000..0fcf225 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go @@ -0,0 +1,818 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +package journal + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math/rand" + "strings" + "testing" +) + +type dropper struct { + t *testing.T +} + +func (d dropper) Drop(err error) { + d.t.Log(err) +} + +func short(s string) string { + if len(s) < 64 { + return s + } + return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) +} + +// big returns a string of length n, composed of repetitions of partial. +func big(partial string, n int) string { + return strings.Repeat(partial, n/len(partial)+1)[:n] +} + +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + r := NewReader(buf, dropper{t}, true, true) + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { + buf := new(bytes.Buffer) + + reset() + w := NewWriter(buf) + for { + s, ok := gen() + if !ok { + break + } + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write([]byte(s)); err != nil { + t.Fatal(err) + } + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + reset() + r := NewReader(buf, dropper{t}, true, true) + for { + s, ok := gen() + if !ok { + break + } + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + x, err := ioutil.ReadAll(rr) + if err != nil { + t.Fatal(err) + } + if string(x) != s { + t.Fatalf("got %q, want %q", short(string(x)), short(s)) + } + } + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testLiterals(t *testing.T, s []string) { + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == len(s) { + return "", false + } + i++ + return s[i-1], true + } + testGenerator(t, reset, gen) +} + +func TestMany(t *testing.T) { + const n = 1e5 + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return fmt.Sprintf("%d.", i-1), true + } + testGenerator(t, reset, gen) +} + +func TestRandom(t *testing.T) { + const n = 1e2 + var ( + i int + r *rand.Rand + ) + reset := func() { + i, r = 0, rand.New(rand.NewSource(0)) + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true + } + testGenerator(t, reset, gen) +} + +func TestBasic(t *testing.T) { + testLiterals(t, []string{ + strings.Repeat("a", 1000), + strings.Repeat("b", 97270), + strings.Repeat("c", 8000), + }) +} + +func TestBoundary(t *testing.T) { + for i := blockSize - 16; i < blockSize+16; i++ { + s0 := big("abcd", i) + for j := blockSize - 16; j < blockSize+16; j++ { + s1 := big("ABCDE", j) + testLiterals(t, []string{s0, s1}) + testLiterals(t, []string{s0, "", s1}) + testLiterals(t, []string{s0, "x", s1}) + } + } +} + +func TestFlush(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + // Write a couple of records. Everything should still be held + // in the record.Writer buffer, so that buf.Len should be 0. + w0, _ := w.Next() + w0.Write([]byte("0")) + w1, _ := w.Next() + w1.Write([]byte("11")) + if got, want := buf.Len(), 0; got != want { + t.Fatalf("buffer length #0: got %d want %d", got, want) + } + // Flush the record.Writer buffer, which should yield 17 bytes. + // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #1: got %d want %d", got, want) + } + // Do another write, one that isn't large enough to complete the block. + // The write should not have flowed through to buf. + w2, _ := w.Next() + w2.Write(bytes.Repeat([]byte("2"), 10000)) + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #2: got %d want %d", got, want) + } + // Flushing should get us up to 10024 bytes written. + // 10024 = 17 + 7 + 10000. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 10024; got != want { + t.Fatalf("buffer length #3: got %d want %d", got, want) + } + // Do a bigger write, one that completes the current block. + // We should now have 32768 bytes (a complete block), without + // an explicit flush. + w3, _ := w.Next() + w3.Write(bytes.Repeat([]byte("3"), 40000)) + if got, want := buf.Len(), 32768; got != want { + t.Fatalf("buffer length #4: got %d want %d", got, want) + } + // Flushing should get us up to 50038 bytes written. + // 50038 = 10024 + 2*7 + 40000. There are two headers because + // the one record was split into two chunks. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 50038; got != want { + t.Fatalf("buffer length #5: got %d want %d", got, want) + } + // Check that reading those records give the right lengths. + r := NewReader(buf, dropper{t}, true, true) + wants := []int64{1, 2, 10000, 40000} + for i, want := range wants { + rr, _ := r.Next() + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #%d: %v", i, err) + } + if n != want { + t.Fatalf("read #%d: got %d bytes want %d", i, n, want) + } + } +} + +func TestNonExhaustiveRead(t *testing.T) { + const n = 100 + buf := new(bytes.Buffer) + p := make([]byte, 10) + rnd := rand.New(rand.NewSource(1)) + + w := NewWriter(buf) + for i := 0; i < n; i++ { + length := len(p) + rnd.Intn(3*blockSize) + s := string(uint8(i)) + "123456789abcdefgh" + ww, _ := w.Next() + ww.Write([]byte(big(s, length))) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf, dropper{t}, true, true) + for i := 0; i < n; i++ { + rr, _ := r.Next() + _, err := io.ReadFull(rr, p) + if err != nil { + t.Fatal(err) + } + want := string(uint8(i)) + "123456789" + if got := string(p); got != want { + t.Fatalf("read #%d: got %q want %q", i, got, want) + } + } +} + +func TestStaleReader(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w0.Write([]byte("0")) + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1.Write([]byte("11")) + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf, dropper{t}, true, true) + r0, err := r.Next() + if err != nil { + t.Fatal(err) + } + r1, err := r.Next() + if err != nil { + t.Fatal(err) + } + p := make([]byte, 1) + if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale read #0: unexpected error: %v", err) + } + if _, err := r1.Read(p); err != nil { + t.Fatalf("fresh read #1: got %v want nil error", err) + } + if p[0] != '1' { + t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) + } +} + +func TestStaleWriter(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #0: unexpected error: %v", err) + } + if _, err := w1.Write([]byte("11")); err != nil { + t.Fatalf("fresh write #1: got %v want nil error", err) + } + if err := w.Flush(); err != nil { + t.Fatalf("flush: %v", err) + } + if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #1: unexpected error: %v", err) + } +} + +func TestCorrupt_MissingLastBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Cut the last block. + b := buf.Bytes()[:blockSize] + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read. + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if n != blockSize-1024 { + t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) + } + + // Second read. + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedFirstBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #0. + for i := 0; i < 1024; i++ { + b[i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (third record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize-headerSize) + 2; n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #1. + for i := 0; i < 1024; i++ { + b[blockSize+i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + // Third read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #2: %v", err) + } + if want := int64(blockSize-headerSize) + 2; n != want { + t.Fatalf("read #2: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedLastBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #3. + for i := len(b) - 1; i > len(b)-1024; i-- { + b[i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize - headerSize); n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + // Third read (third record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #2: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #2: got %d bytes want %d", n, want) + } + + // Fourth read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #3: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting record #1. + x := blockSize + binary.LittleEndian.PutUint16(b[x+4:], 0xffff) + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting record #1. + x := blockSize/2 + headerSize + binary.LittleEndian.PutUint16(b[x+4:], 0xffff) + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (third record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go new file mode 100644 index 0000000..b9acf93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go @@ -0,0 +1,139 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" +) + +type vType int + +func (t vType) String() string { + switch t { + case tDel: + return "d" + case tVal: + return "v" + } + return "x" +} + +// Value types encoded as the last component of internal keys. +// Don't modify; this value are saved to disk. +const ( + tDel vType = iota + tVal +) + +// tSeek defines the vType that should be passed when constructing an +// internal key for seeking to a particular sequence number (since we +// sort sequence numbers in decreasing order and the value type is +// embedded as the low 8 bits in the sequence number in internal keys, +// we need to use the highest-numbered ValueType, not the lowest). +const tSeek = tVal + +const ( + // Maximum value possible for sequence number; the 8-bits are + // used by value type, so its can packed together in single + // 64-bit integer. + kMaxSeq uint64 = (uint64(1) << 56) - 1 + // Maximum value possible for packed sequence number and type. + kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek) +) + +// Maximum number encoded in bytes. +var kMaxNumBytes = make([]byte, 8) + +func init() { + binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) +} + +type iKey []byte + +func newIKey(ukey []byte, seq uint64, t vType) iKey { + if seq > kMaxSeq || t > tVal { + panic("invalid seq number or value type") + } + + b := make(iKey, len(ukey)+8) + copy(b, ukey) + binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t)) + return b +} + +func parseIkey(p []byte) (ukey []byte, seq uint64, t vType, ok bool) { + if len(p) < 8 { + return + } + num := binary.LittleEndian.Uint64(p[len(p)-8:]) + seq, t = uint64(num>>8), vType(num&0xff) + if t > tVal { + return + } + ukey = p[:len(p)-8] + ok = true + return +} + +func validIkey(p []byte) bool { + _, _, _, ok := parseIkey(p) + return ok +} + +func (p iKey) assert() { + if p == nil { + panic("nil iKey") + } + if len(p) < 8 { + panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p))) + } +} + +func (p iKey) ok() bool { + if len(p) < 8 { + return false + } + _, _, ok := p.parseNum() + return ok +} + +func (p iKey) ukey() []byte { + p.assert() + return p[:len(p)-8] +} + +func (p iKey) num() uint64 { + p.assert() + return binary.LittleEndian.Uint64(p[len(p)-8:]) +} + +func (p iKey) parseNum() (seq uint64, t vType, ok bool) { + if p == nil { + panic("nil iKey") + } + if len(p) < 8 { + return + } + num := p.num() + seq, t = uint64(num>>8), vType(num&0xff) + if t > tVal { + return 0, 0, false + } + ok = true + return +} + +func (p iKey) String() string { + if len(p) == 0 { + return "" + } + if seq, t, ok := p.parseNum(); ok { + return fmt.Sprintf("%s,%s%d", shorten(string(p.ukey())), t, seq) + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go new file mode 100644 index 0000000..918fcf7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go @@ -0,0 +1,123 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" +) + +var defaultIComparer = &iComparer{comparer.DefaultComparer} + +func ikey(key string, seq uint64, t vType) iKey { + return newIKey([]byte(key), uint64(seq), t) +} + +func shortSep(a, b []byte) []byte { + dst := make([]byte, len(a)) + dst = defaultIComparer.Separator(dst[:0], a, b) + if dst == nil { + return a + } + return dst +} + +func shortSuccessor(b []byte) []byte { + dst := make([]byte, len(b)) + dst = defaultIComparer.Successor(dst[:0], b) + if dst == nil { + return b + } + return dst +} + +func testSingleKey(t *testing.T, key string, seq uint64, vt vType) { + ik := ikey(key, seq, vt) + + if !bytes.Equal(ik.ukey(), []byte(key)) { + t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) + } + + if rseq, rt, ok := ik.parseNum(); ok { + if rseq != seq { + t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) + } + + if rt != vt { + t.Errorf("type does not equal, got %v, want %v", rt, vt) + } + } else { + t.Error("cannot parse seq and type") + } +} + +func TestIKey_EncodeDecode(t *testing.T) { + keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} + seqs := []uint64{ + 1, 2, 3, + (1 << 8) - 1, 1 << 8, (1 << 8) + 1, + (1 << 16) - 1, 1 << 16, (1 << 16) + 1, + (1 << 32) - 1, 1 << 32, (1 << 32) + 1, + } + for _, key := range keys { + for _, seq := range seqs { + testSingleKey(t, key, seq, tVal) + testSingleKey(t, "hello", 1, tDel) + } + } +} + +func assertBytes(t *testing.T, want, got []byte) { + if !bytes.Equal(got, want) { + t.Errorf("assert failed, got %v, want %v", got, want) + } +} + +func TestIKeyShortSeparator(t *testing.T) { + // When user keys are same + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 99, tVal))) + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 101, tVal))) + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 100, tVal))) + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 100, tDel))) + + // When user keys are misordered + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("bar", 99, tVal))) + + // When user keys are different, but correctly ordered + assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek), + shortSep(ikey("foo", 100, tVal), + ikey("hello", 200, tVal))) + + // When start user key is prefix of limit user key + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foobar", 200, tVal))) + + // When limit user key is prefix of start user key + assertBytes(t, ikey("foobar", 100, tVal), + shortSep(ikey("foobar", 100, tVal), + ikey("foo", 200, tVal))) +} + +func TestIKeyShortestSuccessor(t *testing.T) { + assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek), + shortSuccessor(ikey("foo", 100, tVal))) + assertBytes(t, ikey("\xff\xff", 100, tVal), + shortSuccessor(ikey("\xff\xff", 100, tVal))) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go new file mode 100644 index 0000000..e97041c --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go @@ -0,0 +1,20 @@ +package leveldb + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestLeveldb(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Leveldb Suite") + + RegisterTestingT(t) + testutil.RunDefer("teardown") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go new file mode 100644 index 0000000..d4bcea9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go @@ -0,0 +1,75 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package memdb + +import ( + "encoding/binary" + "math/rand" + "testing" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" +) + +func BenchmarkPut(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + b.ResetTimer() + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } +} + +func BenchmarkPutRandom(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) + } + + b.ResetTimer() + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } +} + +func BenchmarkGet(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } + + b.ResetTimer() + for i := range buf { + p.Get(buf[i][:]) + } +} + +func BenchmarkGetRandom(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + p.Get(buf[rand.Int()%b.N][:]) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go new file mode 100644 index 0000000..0f14965 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go @@ -0,0 +1,452 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package memdb provides in-memory key/value database implementation. +package memdb + +import ( + "math/rand" + "sync" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrNotFound = util.ErrNotFound +) + +const tMaxHeight = 12 + +type dbIter struct { + util.BasicReleaser + p *DB + slice *util.Range + node int + forward bool + key, value []byte +} + +func (i *dbIter) fill(checkStart, checkLimit bool) bool { + if i.node != 0 { + n := i.p.nodeData[i.node] + m := n + i.p.nodeData[i.node+nKey] + i.key = i.p.kvData[n:m] + if i.slice != nil { + switch { + case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: + fallthrough + case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: + i.node = 0 + goto bail + } + } + i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] + return true + } +bail: + i.key = nil + i.value = nil + return false +} + +func (i *dbIter) Valid() bool { + return i.node != 0 +} + +func (i *dbIter) First() bool { + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil { + i.node, _ = i.p.findGE(i.slice.Start, false) + } else { + i.node = i.p.nodeData[nNext] + } + return i.fill(false, true) +} + +func (i *dbIter) Last() bool { + if i.p == nil { + return false + } + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Limit != nil { + i.node = i.p.findLT(i.slice.Limit) + } else { + i.node = i.p.findLast() + } + return i.fill(true, false) +} + +func (i *dbIter) Seek(key []byte) bool { + if i.p == nil { + return false + } + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { + key = i.slice.Start + } + i.node, _ = i.p.findGE(key, false) + return i.fill(false, true) +} + +func (i *dbIter) Next() bool { + if i.p == nil { + return false + } + if i.node == 0 { + if !i.forward { + return i.First() + } + return false + } + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.nodeData[i.node+nNext] + return i.fill(false, true) +} + +func (i *dbIter) Prev() bool { + if i.p == nil { + return false + } + if i.node == 0 { + if i.forward { + return i.Last() + } + return false + } + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.findLT(i.key) + return i.fill(true, false) +} + +func (i *dbIter) Key() []byte { + return i.key +} + +func (i *dbIter) Value() []byte { + return i.value +} + +func (i *dbIter) Error() error { return nil } + +func (i *dbIter) Release() { + if i.p != nil { + i.p = nil + i.node = 0 + i.key = nil + i.value = nil + i.BasicReleaser.Release() + } +} + +const ( + nKV = iota + nKey + nVal + nHeight + nNext +) + +// DB is an in-memory key/value database. +type DB struct { + cmp comparer.BasicComparer + rnd *rand.Rand + + mu sync.RWMutex + kvData []byte + // Node data: + // [0] : KV offset + // [1] : Key length + // [2] : Value length + // [3] : Height + // [3..height] : Next nodes + nodeData []int + prevNode [tMaxHeight]int + maxHeight int + n int + kvSize int +} + +func (p *DB) randHeight() (h int) { + const branching = 4 + h = 1 + for h < tMaxHeight && p.rnd.Int()%branching == 0 { + h++ + } + return +} + +func (p *DB) findGE(key []byte, prev bool) (int, bool) { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + cmp := 1 + if next != 0 { + o := p.nodeData[next] + cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) + } + if cmp < 0 { + // Keep searching in this list + node = next + } else { + if prev { + p.prevNode[h] = node + } else if cmp == 0 { + return next, true + } + if h == 0 { + return next, cmp == 0 + } + h-- + } + } +} + +func (p *DB) findLT(key []byte) int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + o := p.nodeData[next] + if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +func (p *DB) findLast() int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + if next == 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Put returns. +func (p *DB) Put(key []byte, value []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + if node, exact := p.findGE(key, true); exact { + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + p.nodeData[node] = kvOffset + m := p.nodeData[node+nVal] + p.nodeData[node+nVal] = len(value) + p.kvSize += len(value) - m + return nil + } + + h := p.randHeight() + if h > p.maxHeight { + for i := p.maxHeight; i < h; i++ { + p.prevNode[i] = 0 + } + p.maxHeight = h + } + + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + // Node + node := len(p.nodeData) + p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) + for i, n := range p.prevNode[:h] { + m := n + 4 + i + p.nodeData = append(p.nodeData, p.nodeData[m]) + p.nodeData[m] = node + } + + p.kvSize += len(key) + len(value) + p.n++ + return nil +} + +// Delete deletes the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (p *DB) Delete(key []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + node, exact := p.findGE(key, true) + if !exact { + return ErrNotFound + } + + h := p.nodeData[node+nHeight] + for i, n := range p.prevNode[:h] { + m := n + 4 + i + p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] + } + + p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] + p.n-- + return nil +} + +// Contains returns true if the given key are in the DB. +// +// It is safe to modify the contents of the arguments after Contains returns. +func (p *DB) Contains(key []byte) bool { + p.mu.RLock() + _, exact := p.findGE(key, false) + p.mu.RUnlock() + return exact +} + +// Get gets the value for the given key. It returns error.ErrNotFound if the +// DB does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (p *DB) Get(key []byte) (value []byte, err error) { + p.mu.RLock() + if node, exact := p.findGE(key, false); exact { + o := p.nodeData[node] + p.nodeData[node+nKey] + value = p.kvData[o : o+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Find returns. +func (p *DB) Find(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node, _ := p.findGE(key, false); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// NewIterator returns an iterator of the DB. +// The returned iterator is not goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. However, the resultant key/value pairs are not guaranteed +// to be a consistent snapshot of the DB at a particular point in time. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { + return &dbIter{p: p, slice: slice} +} + +// Capacity returns keys/values buffer capacity. +func (p *DB) Capacity() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) +} + +// Size returns sum of keys and values length. Note that deleted +// key/value will not be accouted for, but it will still consume +// the buffer, since the buffer is append only. +func (p *DB) Size() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.kvSize +} + +// Free returns keys/values free buffer before need to grow. +func (p *DB) Free() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) - len(p.kvData) +} + +// Len returns the number of entries in the DB. +func (p *DB) Len() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.n +} + +// Reset resets the DB to initial empty state. Allows reuse the buffer. +func (p *DB) Reset() { + p.rnd = rand.New(rand.NewSource(0xdeadbeef)) + p.maxHeight = 1 + p.n = 0 + p.kvSize = 0 + p.kvData = p.kvData[:0] + p.nodeData = p.nodeData[:4+tMaxHeight] + p.nodeData[nKV] = 0 + p.nodeData[nKey] = 0 + p.nodeData[nVal] = 0 + p.nodeData[nHeight] = tMaxHeight + for n := 0; n < tMaxHeight; n++ { + p.nodeData[4+n] = 0 + p.prevNode[n] = 0 + } +} + +// New creates a new initalized in-memory key/value DB. The capacity +// is the initial key/value buffer capacity. The capacity is advisory, +// not enforced. +// +// The returned DB instance is goroutine-safe. +func New(cmp comparer.BasicComparer, capacity int) *DB { + p := &DB{ + cmp: cmp, + rnd: rand.New(rand.NewSource(0xdeadbeef)), + maxHeight: 1, + kvData: make([]byte, 0, capacity), + nodeData: make([]int, 4+tMaxHeight), + } + p.nodeData[nHeight] = tMaxHeight + return p +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go new file mode 100644 index 0000000..9e699ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go @@ -0,0 +1,17 @@ +package memdb + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestMemdb(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Memdb Suite") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go new file mode 100644 index 0000000..35386fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go @@ -0,0 +1,135 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package memdb + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node := p.findLT(key); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +func (p *DB) TestFindLast() (rkey, value []byte, err error) { + p.mu.RLock() + if node := p.findLast(); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +func (p *DB) TestPut(key []byte, value []byte) error { + p.Put(key, value) + return nil +} + +func (p *DB) TestDelete(key []byte) error { + p.Delete(key) + return nil +} + +func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { + return p.Find(key) +} + +func (p *DB) TestGet(key []byte) (value []byte, err error) { + return p.Get(key) +} + +func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { + return p.NewIterator(slice) +} + +var _ = testutil.Defer(func() { + Describe("Memdb", func() { + Describe("write test", func() { + It("should do write correctly", func() { + db := New(comparer.DefaultComparer, 0) + t := testutil.DBTesting{ + DB: db, + Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), + PostFn: func(t *testutil.DBTesting) { + Expect(db.Len()).Should(Equal(t.Present.Len())) + Expect(db.Size()).Should(Equal(t.Present.Size())) + switch t.Act { + case testutil.DBPut, testutil.DBOverwrite: + Expect(db.Contains(t.ActKey)).Should(BeTrue()) + default: + Expect(db.Contains(t.ActKey)).Should(BeFalse()) + } + }, + } + testutil.DoDBTesting(&t) + }) + }) + + Describe("read test", func() { + testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { + // Building the DB. + db := New(comparer.DefaultComparer, 0) + kv.IterateShuffled(nil, func(i int, key, value []byte) { + db.Put(key, value) + }) + + if kv.Len() > 1 { + It("Should find correct keys with findLT", func() { + testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { + key_, key, _ := kv.IndexInexact(i + 1) + expectedKey, expectedValue := kv.Index(i) + + // Using key that exist. + rkey, rvalue, err := db.TestFindLT(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) + Expect(rkey).Should(Equal(expectedKey), "Key") + Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) + + // Using key that doesn't exist. + rkey, rvalue, err = db.TestFindLT(key_) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) + Expect(rkey).Should(Equal(expectedKey)) + Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) + }) + }) + } + + if kv.Len() > 0 { + It("Should find last key with findLast", func() { + key, value := kv.Index(kv.Len() - 1) + rkey, rvalue, err := db.TestFindLast() + Expect(err).ShouldNot(HaveOccurred()) + Expect(rkey).Should(Equal(key)) + Expect(rvalue).Should(Equal(value)) + }) + } + + return db + }, nil, nil) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go new file mode 100644 index 0000000..ed9df93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -0,0 +1,326 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package opt provides sets of options used by LevelDB. +package opt + +import ( + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" +) + +const ( + KiB = 1024 + MiB = KiB * 1024 + GiB = MiB * 1024 +) + +const ( + DefaultBlockCacheSize = 8 * MiB + DefaultBlockRestartInterval = 16 + DefaultBlockSize = 4 * KiB + DefaultCompressionType = SnappyCompression + DefaultCachedOpenFiles = 500 + DefaultWriteBuffer = 4 * MiB +) + +type noCache struct{} + +func (noCache) SetCapacity(capacity int) {} +func (noCache) Capacity() int { return 0 } +func (noCache) Used() int { return 0 } +func (noCache) Size() int { return 0 } +func (noCache) NumObjects() int { return 0 } +func (noCache) GetNamespace(id uint64) cache.Namespace { return nil } +func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {} +func (noCache) ZapNamespace(id uint64) {} +func (noCache) Purge(fin cache.PurgeFin) {} +func (noCache) Zap() {} + +var NoCache cache.Cache = noCache{} + +// Compression is the per-block compression algorithm to use. +type Compression uint + +func (c Compression) String() string { + switch c { + case DefaultCompression: + return "default" + case NoCompression: + return "none" + case SnappyCompression: + return "snappy" + } + return "invalid" +} + +const ( + DefaultCompression Compression = iota + NoCompression + SnappyCompression + nCompression +) + +// Strict is the DB strict level. +type Strict uint + +const ( + // If present then a corrupted or invalid chunk or block in manifest + // journal will cause an error istead of being dropped. + StrictManifest Strict = 1 << iota + + // If present then a corrupted or invalid chunk or block in journal + // will cause an error istead of being dropped. + StrictJournal + + // If present then journal chunk checksum will be verified. + StrictJournalChecksum + + // If present then an invalid key/value pair will cause an error + // instead of being skipped. + StrictIterator + + // If present then 'sorted table' block checksum will be verified. + StrictBlockChecksum + + // StrictAll enables all strict flags. + StrictAll = StrictManifest | StrictJournal | StrictJournalChecksum | StrictIterator | StrictBlockChecksum + + // DefaultStrict is the default strict flags. Specify any strict flags + // will override default strict flags as whole (i.e. not OR'ed). + DefaultStrict = StrictJournalChecksum | StrictBlockChecksum + + // NoStrict disables all strict flags. Override default strict flags. + NoStrict = ^StrictAll +) + +// Options holds the optional parameters for the DB at large. +type Options struct { + // AltFilters defines one or more 'alternative filters'. + // 'alternative filters' will be used during reads if a filter block + // does not match with the 'effective filter'. + // + // The default value is nil + AltFilters []filter.Filter + + // BlockCache provides per-block caching for LevelDB. Specify NoCache to + // disable block caching. + // + // By default LevelDB will create LRU-cache with capacity of 8MiB. + BlockCache cache.Cache + + // BlockRestartInterval is the number of keys between restart points for + // delta encoding of keys. + // + // The default value is 16. + BlockRestartInterval int + + // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' + // block. + // + // The default value is 4KiB. + BlockSize int + + // CachedOpenFiles defines number of open files to kept around when not + // in-use, the counting includes still in-use files. + // Set this to negative value to disable caching. + // + // The default value is 500. + CachedOpenFiles int + + // Comparer defines a total ordering over the space of []byte keys: a 'less + // than' relationship. The same comparison algorithm must be used for reads + // and writes over the lifetime of the DB. + // + // The default value uses the same ordering as bytes.Compare. + Comparer comparer.Comparer + + // Compression defines the per-block compression to use. + // + // The default value (DefaultCompression) uses snappy compression. + Compression Compression + + // ErrorIfExist defines whether an error should returned if the DB already + // exist. + // + // The default value is false. + ErrorIfExist bool + + // ErrorIfMissing defines whether an error should returned if the DB is + // missing. If false then the database will be created if missing, otherwise + // an error will be returned. + // + // The default value is false. + ErrorIfMissing bool + + // Filter defines an 'effective filter' to use. An 'effective filter' + // if defined will be used to generate per-table filter block. + // The filter name will be stored on disk. + // During reads LevelDB will try to find matching filter from + // 'effective filter' and 'alternative filters'. + // + // Filter can be changed after a DB has been created. It is recommended + // to put old filter to the 'alternative filters' to mitigate lack of + // filter during transition period. + // + // A filter is used to reduce disk reads when looking for a specific key. + // + // The default value is nil. + Filter filter.Filter + + // Strict defines the DB strict level. + Strict Strict + + // WriteBuffer defines maximum size of a 'memdb' before flushed to + // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk + // unsorted journal. + // + // LevelDB may held up to two 'memdb' at the same time. + // + // The default value is 4MiB. + WriteBuffer int +} + +func (o *Options) GetAltFilters() []filter.Filter { + if o == nil { + return nil + } + return o.AltFilters +} + +func (o *Options) GetBlockCache() cache.Cache { + if o == nil { + return nil + } + return o.BlockCache +} + +func (o *Options) GetBlockRestartInterval() int { + if o == nil || o.BlockRestartInterval <= 0 { + return DefaultBlockRestartInterval + } + return o.BlockRestartInterval +} + +func (o *Options) GetBlockSize() int { + if o == nil || o.BlockSize <= 0 { + return DefaultBlockSize + } + return o.BlockSize +} + +func (o *Options) GetCachedOpenFiles() int { + if o == nil || o.CachedOpenFiles == 0 { + return DefaultCachedOpenFiles + } else if o.CachedOpenFiles < 0 { + return 0 + } + return o.CachedOpenFiles +} + +func (o *Options) GetComparer() comparer.Comparer { + if o == nil || o.Comparer == nil { + return comparer.DefaultComparer + } + return o.Comparer +} + +func (o *Options) GetCompression() Compression { + if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { + return DefaultCompressionType + } + return o.Compression +} + +func (o *Options) GetErrorIfExist() bool { + if o == nil { + return false + } + return o.ErrorIfExist +} + +func (o *Options) GetErrorIfMissing() bool { + if o == nil { + return false + } + return o.ErrorIfMissing +} + +func (o *Options) GetFilter() filter.Filter { + if o == nil { + return nil + } + return o.Filter +} + +func (o *Options) GetStrict(strict Strict) bool { + if o == nil || o.Strict == 0 { + return DefaultStrict&strict != 0 + } + return o.Strict&strict != 0 +} + +func (o *Options) GetWriteBuffer() int { + if o == nil || o.WriteBuffer <= 0 { + return DefaultWriteBuffer + } + return o.WriteBuffer +} + +// ReadOptions holds the optional parameters for 'read operation'. The +// 'read operation' includes Get, Find and NewIterator. +type ReadOptions struct { + // DontFillCache defines whether block reads for this 'read operation' + // should be cached. If false then the block will be cached. This does + // not affects already cached block. + // + // The default value is false. + DontFillCache bool + + // Strict overrides global DB strict level. Only StrictIterator and + // StrictBlockChecksum that does have effects here. + Strict Strict +} + +func (ro *ReadOptions) GetDontFillCache() bool { + if ro == nil { + return false + } + return ro.DontFillCache +} + +func (ro *ReadOptions) GetStrict(strict Strict) bool { + if ro == nil { + return false + } + return ro.Strict&strict != 0 +} + +// WriteOptions holds the optional parameters for 'write operation'. The +// 'write operation' includes Write, Put and Delete. +type WriteOptions struct { + // Sync is whether to sync underlying writes from the OS buffer cache + // through to actual disk, if applicable. Setting Sync can result in + // slower writes. + // + // If false, and the machine crashes, then some recent writes may be lost. + // Note that if it is just the process that crashes (and the machine does + // not) then no writes will be lost. + // + // In other words, Sync being false has the same semantics as a write + // system call. Sync being true means write followed by fsync. + // + // The default value is false. + Sync bool +} + +func (wo *WriteOptions) GetSync() bool { + if wo == nil { + return false + } + return wo.Sync +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go new file mode 100644 index 0000000..bbe00f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go @@ -0,0 +1,41 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" +) + +func (s *session) setOptions(o *opt.Options) { + s.o = &opt.Options{} + if o != nil { + *s.o = *o + } + // Alternative filters. + if filters := o.GetAltFilters(); len(filters) > 0 { + s.o.AltFilters = make([]filter.Filter, len(filters)) + for i, filter := range filters { + s.o.AltFilters[i] = &iFilter{filter} + } + } + // Block cache. + switch o.GetBlockCache() { + case nil: + s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize) + case opt.NoCache: + s.o.BlockCache = nil + } + // Comparer. + s.icmp = &iComparer{o.GetComparer()} + s.o.Comparer = s.icmp + // Filter. + if filter := o.GetFilter(); filter != nil { + s.o.Filter = &iFilter{filter} + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go new file mode 100644 index 0000000..060f6c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go @@ -0,0 +1,396 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "io" + "os" + "sync" + "sync/atomic" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// session represent a persistent database session. +type session struct { + // Need 64-bit alignment. + stFileNum uint64 // current unused file number + stJournalNum uint64 // current journal file number; need external synchronization + stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stSeq uint64 // last mem compacted seq; need external synchronization + stTempFileNum uint64 + + stor storage.Storage + storLock util.Releaser + o *opt.Options + icmp *iComparer + tops *tOps + + manifest *journal.Writer + manifestWriter storage.Writer + manifestFile storage.File + + stCptrs [kNumLevels]iKey // compact pointers; need external synchronization + stVersion *version // current version + vmu sync.Mutex +} + +// Creates new initialized session instance. +func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { + if stor == nil { + return nil, os.ErrInvalid + } + storLock, err := stor.Lock() + if err != nil { + return + } + s = &session{ + stor: stor, + storLock: storLock, + } + s.setOptions(o) + s.tops = newTableOps(s, s.o.GetCachedOpenFiles()) + s.setVersion(&version{s: s}) + s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock D·DeletedEntry L·Level Q·SeqNum T·TimeElapsed") + return +} + +// Close session. +func (s *session) close() { + s.tops.close() + if bc := s.o.GetBlockCache(); bc != nil { + bc.Purge(nil) + } + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + s.manifest = nil + s.manifestWriter = nil + s.manifestFile = nil + s.stVersion = nil +} + +// Release session lock. +func (s *session) release() { + s.storLock.Release() +} + +// Create a new database session; need external synchronization. +func (s *session) create() error { + // create manifest + return s.newManifest(nil, nil) +} + +// Recover a database session; need external synchronization. +func (s *session) recover() (err error) { + defer func() { + if os.IsNotExist(err) { + // Don't return os.ErrNotExist if the underlying storage contains + // other files that belong to LevelDB. So the DB won't get trashed. + if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { + err = ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest file missing")} + } + } + }() + + file, err := s.stor.GetManifest() + if err != nil { + return + } + + reader, err := file.Open() + if err != nil { + return + } + defer reader.Close() + strict := s.o.GetStrict(opt.StrictManifest) + jr := journal.NewReader(reader, dropper{s, file}, strict, true) + + staging := s.version_NB().newStaging() + rec := &sessionRecord{} + for { + var r io.Reader + r, err = jr.Next() + if err != nil { + if err == io.EOF { + err = nil + break + } + return + } + + err = rec.decode(r) + if err == nil { + // save compact pointers + for _, r := range rec.compactionPointers { + s.stCptrs[r.level] = iKey(r.ikey) + } + // commit record to version staging + staging.commit(rec) + } else if strict { + return ErrCorrupted{Type: CorruptedManifest, Err: err} + } else { + s.logf("manifest error: %v (skipped)", err) + } + rec.resetCompactionPointers() + rec.resetAddedTables() + rec.resetDeletedTables() + } + + switch { + case !rec.has(recComparer): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")} + case rec.comparer != s.icmp.uName(): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")} + case !rec.has(recNextNum): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")} + case !rec.has(recJournalNum): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing journal file number")} + case !rec.has(recSeq): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing seq number")} + } + + s.manifestFile = file + s.setVersion(staging.finish()) + s.setFileNum(rec.nextNum) + s.recordCommited(rec) + return nil +} + +// Commit session; need external synchronization. +func (s *session) commit(r *sessionRecord) (err error) { + // spawn new version based on current version + nv := s.version_NB().spawn(r) + + if s.manifest == nil { + // manifest journal writer not yet created, create one + err = s.newManifest(r, nv) + } else { + err = s.flushManifest(r) + } + + // finally, apply new version if no error rise + if err == nil { + s.setVersion(nv) + } + + return +} + +// Pick a compaction based on current state; need external synchronization. +func (s *session) pickCompaction() *compaction { + v := s.version_NB() + + var level int + var t0 tFiles + if v.cScore >= 1 { + level = v.cLevel + cptr := s.stCptrs[level] + tables := v.tables[level] + for _, t := range tables { + if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { + t0 = append(t0, t) + break + } + } + if len(t0) == 0 { + t0 = append(t0, tables[0]) + } + } else { + if p := atomic.LoadPointer(&v.cSeek); p != nil { + ts := (*tSet)(p) + level = ts.level + t0 = append(t0, ts.table) + } else { + return nil + } + } + + c := &compaction{s: s, v: v, level: level} + if level == 0 { + imin, imax := t0.getRange(s.icmp) + t0 = v.tables[0].getOverlaps(t0[:0], s.icmp, imin.ukey(), imax.ukey(), true) + } + + c.tables[0] = t0 + c.expand() + return c +} + +// Create compaction from given level and range; need external synchronization. +func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { + v := s.version_NB() + + t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) + if len(t0) == 0 { + return nil + } + + // Avoid compacting too much in one shot in case the range is large. + // But we cannot do this for level-0 since level-0 files can overlap + // and we must not pick one file and drop another older file if the + // two files overlap. + if level > 0 { + limit := uint64(kMaxTableSize) + total := uint64(0) + for i, t := range t0 { + total += t.size + if total >= limit { + s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) + t0 = t0[:i+1] + break + } + } + } + + c := &compaction{s: s, v: v, level: level} + c.tables[0] = t0 + c.expand() + return c +} + +// compaction represent a compaction state. +type compaction struct { + s *session + v *version + + level int + tables [2]tFiles + + gp tFiles + gpidx int + seenKey bool + overlappedBytes uint64 + imin, imax iKey + + tPtrs [kNumLevels]int +} + +// Expand compacted tables; need external synchronization. +func (c *compaction) expand() { + level := c.level + vt0, vt1 := c.v.tables[level], c.v.tables[level+1] + + t0, t1 := c.tables[0], c.tables[1] + imin, imax := t0.getRange(c.s.icmp) + t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) + // Get entire range covered by compaction. + amin, amax := append(t0, t1...).getRange(c.s.icmp) + + // See if we can grow the number of inputs in "level" without + // changing the number of "level+1" files we pick up. + if len(t1) > 0 { + exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), level == 0) + if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes { + xmin, xmax := exp0.getRange(c.s.icmp) + exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) + if len(exp1) == len(t1) { + c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", + level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), + len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) + imin, imax = xmin, xmax + t0, t1 = exp0, exp1 + amin, amax = append(t0, t1...).getRange(c.s.icmp) + } + } + } + + // Compute the set of grandparent files that overlap this compaction + // (parent == level+1; grandparent == level+2) + if level+2 < kNumLevels { + c.gp = c.v.tables[level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) + } + + c.tables[0], c.tables[1] = t0, t1 + c.imin, c.imax = imin, imax +} + +// Check whether compaction is trivial. +func (c *compaction) trivial() bool { + return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= kMaxGrandParentOverlapBytes +} + +func (c *compaction) baseLevelForKey(ukey []byte) bool { + for level, tables := range c.v.tables[c.level+2:] { + for c.tPtrs[level] < len(tables) { + t := tables[c.tPtrs[level]] + if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { + // We've advanced far enough. + if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + // Key falls in this file's range, so definitely not base level. + return false + } + break + } + c.tPtrs[level]++ + } + } + return true +} + +func (c *compaction) shouldStopBefore(ikey iKey) bool { + for ; c.gpidx < len(c.gp); c.gpidx++ { + gp := c.gp[c.gpidx] + if c.s.icmp.Compare(ikey, gp.imax) <= 0 { + break + } + if c.seenKey { + c.overlappedBytes += gp.size + } + } + c.seenKey = true + + if c.overlappedBytes > kMaxGrandParentOverlapBytes { + // Too much overlap for current output; start new output. + c.overlappedBytes = 0 + return true + } + return false +} + +// Creates an iterator. +func (c *compaction) newIterator() iterator.Iterator { + // Creates iterator slice. + icap := len(c.tables) + if c.level == 0 { + // Special case for level-0 + icap = len(c.tables[0]) + 1 + } + its := make([]iterator.Iterator, 0, icap) + + // Options. + ro := &opt.ReadOptions{ + DontFillCache: true, + } + strict := c.s.o.GetStrict(opt.StrictIterator) + + for i, tables := range c.tables { + if len(tables) == 0 { + continue + } + + // Level-0 is not sorted and may overlaps each other. + if c.level+i == 0 { + for _, t := range tables { + its = append(its, c.s.tops.newIterator(t, nil, ro)) + } + } else { + it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict, true) + its = append(its, it) + } + } + + return iterator.NewMergedIterator(its, c.s.icmp, true) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go new file mode 100644 index 0000000..2721295 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go @@ -0,0 +1,308 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bufio" + "encoding/binary" + "errors" + "io" +) + +var errCorruptManifest = errors.New("leveldb: corrupt manifest") + +type byteReader interface { + io.Reader + io.ByteReader +} + +// These numbers are written to disk and should not be changed. +const ( + recComparer = 1 + recJournalNum = 2 + recNextNum = 3 + recSeq = 4 + recCompactionPointer = 5 + recDeletedTable = 6 + recNewTable = 7 + // 8 was used for large value refs + recPrevJournalNum = 9 +) + +type cpRecord struct { + level int + ikey iKey +} + +type ntRecord struct { + level int + num uint64 + size uint64 + imin iKey + imax iKey +} + +func (r ntRecord) makeFile(s *session) *tFile { + return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) +} + +type dtRecord struct { + level int + num uint64 +} + +type sessionRecord struct { + hasRec int + comparer string + journalNum uint64 + prevJournalNum uint64 + nextNum uint64 + seq uint64 + compactionPointers []cpRecord + addedTables []ntRecord + deletedTables []dtRecord + scratch [binary.MaxVarintLen64]byte + err error +} + +func (p *sessionRecord) has(rec int) bool { + return p.hasRec&(1<= kNumLevels { + p.err = errCorruptManifest + return 0 + } + return int(x) +} + +func (p *sessionRecord) decode(r io.Reader) error { + br, ok := r.(byteReader) + if !ok { + br = bufio.NewReader(r) + } + p.err = nil + for p.err == nil { + rec, err := binary.ReadUvarint(br) + if err != nil { + if err == io.EOF { + err = nil + } + return err + } + switch rec { + case recComparer: + x := p.readBytes(br) + if p.err == nil { + p.setComparer(string(x)) + } + case recJournalNum: + x := p.readUvarint(br) + if p.err == nil { + p.setJournalNum(x) + } + case recPrevJournalNum: + x := p.readUvarint(br) + if p.err == nil { + p.setPrevJournalNum(x) + } + case recNextNum: + x := p.readUvarint(br) + if p.err == nil { + p.setNextNum(x) + } + case recSeq: + x := p.readUvarint(br) + if p.err == nil { + p.setSeq(x) + } + case recCompactionPointer: + level := p.readLevel(br) + ikey := p.readBytes(br) + if p.err == nil { + p.addCompactionPointer(level, iKey(ikey)) + } + case recNewTable: + level := p.readLevel(br) + num := p.readUvarint(br) + size := p.readUvarint(br) + imin := p.readBytes(br) + imax := p.readBytes(br) + if p.err == nil { + p.addTable(level, num, size, imin, imax) + } + case recDeletedTable: + level := p.readLevel(br) + num := p.readUvarint(br) + if p.err == nil { + p.deleteTable(level, num) + } + } + } + + return p.err +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go new file mode 100644 index 0000000..029fabf --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" +) + +func decodeEncode(v *sessionRecord) (res bool, err error) { + b := new(bytes.Buffer) + err = v.encode(b) + if err != nil { + return + } + v2 := new(sessionRecord) + err = v.decode(b) + if err != nil { + return + } + b2 := new(bytes.Buffer) + err = v2.encode(b2) + if err != nil { + return + } + return bytes.Equal(b.Bytes(), b2.Bytes()), nil +} + +func TestSessionRecord_EncodeDecode(t *testing.T) { + big := uint64(1) << 50 + v := new(sessionRecord) + i := uint64(0) + test := func() { + res, err := decodeEncode(v) + if err != nil { + t.Fatalf("error when testing encode/decode sessionRecord: %v", err) + } + if !res { + t.Error("encode/decode test failed at iteration:", i) + } + } + + for ; i < 4; i++ { + test() + v.addTable(3, big+300+i, big+400+i, + newIKey([]byte("foo"), big+500+1, tVal), + newIKey([]byte("zoo"), big+600+1, tDel)) + v.deleteTable(4, big+700+i) + v.addCompactionPointer(int(i), newIKey([]byte("x"), big+900+1, tVal)) + } + + v.setComparer("foo") + v.setJournalNum(big + 100) + v.setPrevJournalNum(big + 99) + v.setNextNum(big + 200) + v.setSeq(big + 1000) + test() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go new file mode 100644 index 0000000..814edc4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go @@ -0,0 +1,248 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +// Logging. + +type dropper struct { + s *session + file storage.File +} + +func (d dropper) Drop(err error) { + if e, ok := err.(journal.ErrCorrupted); ok { + d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) + } else { + d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) + } +} + +func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } +func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } + +// File utils. + +func (s *session) getJournalFile(num uint64) storage.File { + return s.stor.GetFile(num, storage.TypeJournal) +} + +func (s *session) getTableFile(num uint64) storage.File { + return s.stor.GetFile(num, storage.TypeTable) +} + +func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { + return s.stor.GetFiles(t) +} + +func (s *session) newTemp() storage.File { + num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 + return s.stor.GetFile(num, storage.TypeTemp) +} + +// Session state. + +// Get current version. +func (s *session) version() *version { + s.vmu.Lock() + defer s.vmu.Unlock() + s.stVersion.ref++ + return s.stVersion +} + +// Get current version; no barrier. +func (s *session) version_NB() *version { + return s.stVersion +} + +// Set current version to v. +func (s *session) setVersion(v *version) { + s.vmu.Lock() + v.ref = 1 + if old := s.stVersion; old != nil { + v.ref++ + old.next = v + old.release_NB() + } + s.stVersion = v + s.vmu.Unlock() +} + +// Get current unused file number. +func (s *session) fileNum() uint64 { + return atomic.LoadUint64(&s.stFileNum) +} + +// Get current unused file number to num. +func (s *session) setFileNum(num uint64) { + atomic.StoreUint64(&s.stFileNum, num) +} + +// Mark file number as used. +func (s *session) markFileNum(num uint64) { + num += 1 + for { + old, x := s.stFileNum, num + if old > x { + x = old + } + if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) { + break + } + } +} + +// Allocate a file number. +func (s *session) allocFileNum() (num uint64) { + return atomic.AddUint64(&s.stFileNum, 1) - 1 +} + +// Reuse given file number. +func (s *session) reuseFileNum(num uint64) { + for { + old, x := s.stFileNum, num + if old != x+1 { + x = old + } + if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) { + break + } + } +} + +// Manifest related utils. + +// Fill given session record obj with current states; need external +// synchronization. +func (s *session) fillRecord(r *sessionRecord, snapshot bool) { + r.setNextNum(s.fileNum()) + + if snapshot { + if !r.has(recJournalNum) { + r.setJournalNum(s.stJournalNum) + } + + if !r.has(recSeq) { + r.setSeq(s.stSeq) + } + + for level, ik := range s.stCptrs { + if ik != nil { + r.addCompactionPointer(level, ik) + } + } + + r.setComparer(s.icmp.uName()) + } +} + +// Mark if record has been commited, this will update session state; +// need external synchronization. +func (s *session) recordCommited(r *sessionRecord) { + if r.has(recJournalNum) { + s.stJournalNum = r.journalNum + } + + if r.has(recPrevJournalNum) { + s.stPrevJournalNum = r.prevJournalNum + } + + if r.has(recSeq) { + s.stSeq = r.seq + } + + for _, p := range r.compactionPointers { + s.stCptrs[p.level] = iKey(p.ikey) + } +} + +// Create a new manifest file; need external synchronization. +func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { + num := s.allocFileNum() + file := s.stor.GetFile(num, storage.TypeManifest) + writer, err := file.Create() + if err != nil { + return + } + jw := journal.NewWriter(writer) + + if v == nil { + v = s.version_NB() + } + if rec == nil { + rec = new(sessionRecord) + } + s.fillRecord(rec, true) + v.fillRecord(rec) + + defer func() { + if err == nil { + s.recordCommited(rec) + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + if s.manifestFile != nil { + s.manifestFile.Remove() + } + s.manifestFile = file + s.manifestWriter = writer + s.manifest = jw + } else { + writer.Close() + file.Remove() + s.reuseFileNum(num) + } + }() + + w, err := jw.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = jw.Flush() + if err != nil { + return + } + err = s.stor.SetManifest(file) + return +} + +// Flush record to disk. +func (s *session) flushManifest(rec *sessionRecord) (err error) { + s.fillRecord(rec, false) + w, err := s.manifest.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = s.manifest.Flush() + if err != nil { + return + } + err = s.manifestWriter.Sync() + if err != nil { + return + } + s.recordCommited(rec) + return +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go new file mode 100644 index 0000000..b74753d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -0,0 +1,534 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reservefs. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var errFileOpen = errors.New("leveldb/storage: file still open") + +type fileLock interface { + release() error +} + +type fileStorageLock struct { + fs *fileStorage +} + +func (lock *fileStorageLock) Release() { + fs := lock.fs + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.slock == lock { + fs.slock = nil + } + return +} + +// fileStorage is a file-system backed storage. +type fileStorage struct { + path string + + mu sync.Mutex + flock fileLock + slock *fileStorageLock + logw *os.File + buf []byte + // Opened file counter; if open < 0 means closed. + open int + day int +} + +// OpenFile returns a new filesytem-backed storage implementation with the given +// path. This also hold a file lock, so any subsequent attempt to open the same +// path will fail. +// +// The storage must be closed after use, by calling Close method. +func OpenFile(path string) (Storage, error) { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + flock, err := newFileLock(filepath.Join(path, "LOCK")) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + flock.release() + } + }() + + rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) + logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + + fs := &fileStorage{path: path, flock: flock, logw: logw} + runtime.SetFinalizer(fs, (*fileStorage).Close) + return fs, nil +} + +func (fs *fileStorage) Lock() (util.Releaser, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + if fs.slock != nil { + return nil, ErrLocked + } + fs.slock = &fileStorageLock{fs: fs} + return fs.slock, nil +} + +func itoa(buf []byte, i int, wid int) []byte { + var u uint = uint(i) + if u == 0 && wid <= 1 { + return append(buf, '0') + } + + // Assemble decimal in reverse order. + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + return append(buf, b[bp:]...) +} + +func (fs *fileStorage) printDay(t time.Time) { + if fs.day == t.Day() { + return + } + fs.day = t.Day() + fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) +} + +func (fs *fileStorage) doLog(t time.Time, str string) { + fs.printDay(t) + hour, min, sec := t.Clock() + msec := t.Nanosecond() / 1e3 + // time + fs.buf = itoa(fs.buf[:0], hour, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, min, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, sec, 2) + fs.buf = append(fs.buf, '.') + fs.buf = itoa(fs.buf, msec, 6) + fs.buf = append(fs.buf, ' ') + // write + fs.buf = append(fs.buf, []byte(str)...) + fs.buf = append(fs.buf, '\n') + fs.logw.Write(fs.buf) +} + +func (fs *fileStorage) Log(str string) { + t := time.Now() + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return + } + fs.doLog(t, str) +} + +func (fs *fileStorage) log(str string) { + fs.doLog(time.Now(), str) +} + +func (fs *fileStorage) GetFile(num uint64, t FileType) File { + return &file{fs: fs, num: num, t: t} +} + +func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + fnn, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if err := dir.Close(); err != nil { + fs.log(fmt.Sprintf("close dir: %v", err)) + } + if err != nil { + return + } + f := &file{fs: fs} + for _, fn := range fnn { + if f.parse(fn) && (f.t&t) != 0 { + ff = append(ff, f) + f = &file{fs: fs} + } + } + return +} + +func (fs *fileStorage) GetManifest() (f File, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + fnn, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if err := dir.Close(); err != nil { + fs.log(fmt.Sprintf("close dir: %v", err)) + } + if err != nil { + return + } + // Find latest CURRENT file. + var rem []string + var pend bool + var cerr error + for _, fn := range fnn { + if strings.HasPrefix(fn, "CURRENT") { + pend1 := len(fn) > 7 + // Make sure it is valid name for a CURRENT file, otherwise skip it. + if pend1 { + if fn[7] != '.' || len(fn) < 9 { + fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) + continue + } + if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil { + fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) + continue + } + } + path := filepath.Join(fs.path, fn) + r, e1 := os.OpenFile(path, os.O_RDONLY, 0) + if e1 != nil { + return nil, e1 + } + b, e1 := ioutil.ReadAll(r) + if e1 != nil { + r.Close() + return nil, e1 + } + f1 := &file{fs: fs} + if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { + fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) + if pend1 { + rem = append(rem, fn) + } + if !pend1 || cerr == nil { + cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn) + } + } else if f != nil && f1.Num() < f.Num() { + fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) + if pend1 { + rem = append(rem, fn) + } + } else { + f = f1 + pend = pend1 + } + if err := r.Close(); err != nil { + fs.log(fmt.Sprintf("close %s: %v", fn, err)) + } + } + } + // Don't remove any files if there is no valid CURRENT file. + if f == nil { + if cerr != nil { + err = cerr + } else { + err = os.ErrNotExist + } + return + } + // Rename pending CURRENT file to an effective CURRENT. + if pend { + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) + if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { + fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) + } + } + // Remove obsolete or incomplete pending CURRENT files. + for _, fn := range rem { + path := filepath.Join(fs.path, fn) + if err := os.Remove(path); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", fn, err)) + } + } + return +} + +func (fs *fileStorage) SetManifest(f File) (err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + f2, ok := f.(*file) + if !ok || f2.t != TypeManifest { + return ErrInvalidFile + } + defer func() { + if err != nil { + fs.log(fmt.Sprintf("CURRENT: %v", err)) + } + }() + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) + w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + _, err = fmt.Fprintln(w, f2.name()) + // Close the file first. + if err := w.Close(); err != nil { + fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) + } + if err != nil { + return err + } + return rename(path, filepath.Join(fs.path, "CURRENT")) +} + +func (fs *fileStorage) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + // Clear the finalizer. + runtime.SetFinalizer(fs, nil) + + if fs.open > 0 { + fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open)) + return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open) + } + fs.open = -1 + e1 := fs.logw.Close() + err := fs.flock.release() + if err == nil { + err = e1 + } + return err +} + +type fileWrap struct { + *os.File + f *file +} + +func (fw fileWrap) Sync() error { + if err := fw.File.Sync(); err != nil { + return err + } + if fw.f.Type() == TypeManifest { + // Also sync parent directory if file type is manifest. + // See: https://code.google.com/p/leveldb/issues/detail?id=190. + if err := syncDir(fw.f.fs.path); err != nil { + return err + } + } + return nil +} + +func (fw fileWrap) Close() error { + f := fw.f + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if !f.open { + return ErrClosed + } + f.open = false + f.fs.open-- + err := fw.File.Close() + if err != nil { + f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) + } + return err +} + +type file struct { + fs *fileStorage + num uint64 + t FileType + open bool +} + +func (f *file) Open() (Reader, error) { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return nil, ErrClosed + } + if f.open { + return nil, errFileOpen + } + of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) + if err != nil { + if f.hasOldName() && os.IsNotExist(err) { + of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) + if err == nil { + goto ok + } + } + return nil, err + } +ok: + f.open = true + f.fs.open++ + return fileWrap{of, f}, nil +} + +func (f *file) Create() (Writer, error) { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return nil, ErrClosed + } + if f.open { + return nil, errFileOpen + } + of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil, err + } + f.open = true + f.fs.open++ + return fileWrap{of, f}, nil +} + +func (f *file) Replace(newfile File) error { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return ErrClosed + } + newfile2, ok := newfile.(*file) + if !ok { + return ErrInvalidFile + } + if f.open || newfile2.open { + return errFileOpen + } + return rename(newfile2.path(), f.path()) +} + +func (f *file) Type() FileType { + return f.t +} + +func (f *file) Num() uint64 { + return f.num +} + +func (f *file) Remove() error { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return ErrClosed + } + if f.open { + return errFileOpen + } + err := os.Remove(f.path()) + if err != nil { + f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) + } + // Also try remove file with old name, just in case. + if f.hasOldName() { + if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { + f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) + err = e1 + } + } + return err +} + +func (f *file) hasOldName() bool { + return f.t == TypeTable +} + +func (f *file) oldName() string { + switch f.t { + case TypeTable: + return fmt.Sprintf("%06d.sst", f.num) + } + return f.name() +} + +func (f *file) oldPath() string { + return filepath.Join(f.fs.path, f.oldName()) +} + +func (f *file) name() string { + switch f.t { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", f.num) + case TypeJournal: + return fmt.Sprintf("%06d.log", f.num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", f.num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", f.num) + default: + panic("invalid file type") + } +} + +func (f *file) path() string { + return filepath.Join(f.fs.path, f.name()) +} + +func (f *file) parse(name string) bool { + var num uint64 + var tail string + _, err := fmt.Sscanf(name, "%d.%s", &num, &tail) + if err == nil { + switch tail { + case "log": + f.t = TypeJournal + case "ldb", "sst": + f.t = TypeTable + case "tmp": + f.t = TypeTemp + default: + return false + } + f.num = num + return true + } + n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail) + if n == 1 { + f.t = TypeManifest + f.num = num + return true + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go new file mode 100644 index 0000000..42940d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go @@ -0,0 +1,52 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "os" + "path/filepath" +) + +type plan9FileLock struct { + f *os.File +} + +func (fl *plan9FileLock) release() error { + return fl.f.Close() +} + +func newFileLock(path string) (fl fileLock, err error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) + if err != nil { + return + } + fl = &plan9FileLock{f: f} + return +} + +func rename(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err := os.Remove(newpath); err != nil { + return err + } + } + + _, fname := filepath.Split(newpath) + return os.Rename(oldpath, fname) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go new file mode 100644 index 0000000..102031b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go @@ -0,0 +1,68 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build solaris + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string) (fl fileLock, err error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return + } + err = setFileLock(f, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, lock bool) error { + flock := syscall.Flock_t{ + Type: syscall.F_UNLCK, + Start: 0, + Len: 0, + Whence: 1, + } + if lock { + flock.Type = syscall.F_WRLCK + } + return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go new file mode 100644 index 0000000..92abcbb --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "fmt" + "os" + "path/filepath" + "testing" +) + +var cases = []struct { + oldName []string + name string + ftype FileType + num uint64 +}{ + {nil, "000100.log", TypeJournal, 100}, + {nil, "000000.log", TypeJournal, 0}, + {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, + {nil, "MANIFEST-000002", TypeManifest, 2}, + {nil, "MANIFEST-000007", TypeManifest, 7}, + {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, + {nil, "000100.tmp", TypeTemp, 100}, +} + +var invalidCases = []string{ + "", + "foo", + "foo-dx-100.log", + ".log", + "", + "manifest", + "CURREN", + "CURRENTX", + "MANIFES", + "MANIFEST", + "MANIFEST-", + "XMANIFEST-3", + "MANIFEST-3x", + "LOC", + "LOCKx", + "LO", + "LOGx", + "18446744073709551616.log", + "184467440737095516150.log", + "100", + "100.", + "100.lop", +} + +func TestFileStorage_CreateFileName(t *testing.T) { + for _, c := range cases { + f := &file{num: c.num, t: c.ftype} + if f.name() != c.name { + t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) + } + } +} + +func TestFileStorage_ParseFileName(t *testing.T) { + for _, c := range cases { + for _, name := range append([]string{c.name}, c.oldName...) { + f := new(file) + if !f.parse(name) { + t.Errorf("cannot parse filename '%s'", name) + continue + } + if f.Type() != c.ftype { + t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) + } + if f.Num() != c.num { + t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) + } + } + } +} + +func TestFileStorage_InvalidFileName(t *testing.T) { + for _, name := range invalidCases { + f := new(file) + if f.parse(name) { + t.Errorf("filename '%s' should be invalid", name) + } + } +} + +func TestFileStorage_Locking(t *testing.T) { + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) + + _, err := os.Stat(path) + if err == nil { + err = os.RemoveAll(path) + if err != nil { + t.Fatal("RemoveAll: got error: ", err) + } + } + + p1, err := OpenFile(path) + if err != nil { + t.Fatal("OpenFile(1): got error: ", err) + } + + defer os.RemoveAll(path) + + p2, err := OpenFile(path) + if err != nil { + t.Logf("OpenFile(2): got error: %s (expected)", err) + } else { + p2.Close() + p1.Close() + t.Fatal("OpenFile(2): expect error") + } + + p1.Close() + + p3, err := OpenFile(path) + if err != nil { + t.Fatal("OpenFile(3): got error: ", err) + } + defer p3.Close() + + l, err := p3.Lock() + if err != nil { + t.Fatal("storage lock failed(1): ", err) + } + _, err = p3.Lock() + if err == nil { + t.Fatal("expect error for second storage lock attempt") + } else { + t.Logf("storage lock got error: %s (expected)", err) + } + l.Release() + _, err = p3.Lock() + if err != nil { + t.Fatal("storage lock failed(2): ", err) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go new file mode 100644 index 0000000..d0a604b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -0,0 +1,63 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string) (fl fileLock, err error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return + } + err = setFileLock(f, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, lock bool) error { + how := syscall.LOCK_UN + if lock { + how = syscall.LOCK_EX + } + return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go new file mode 100644 index 0000000..50c3c45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go @@ -0,0 +1,69 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procMoveFileExW = modkernel32.NewProc("MoveFileExW") +) + +const ( + _MOVEFILE_REPLACE_EXISTING = 1 +) + +type windowsFileLock struct { + fd syscall.Handle +} + +func (fl *windowsFileLock) release() error { + return syscall.Close(fl.fd) +} + +func newFileLock(path string) (fl fileLock, err error) { + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return + } + fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err != nil { + return + } + fl = &windowsFileLock{fd: fd} + return +} + +func moveFileEx(from *uint16, to *uint16, flags uint32) error { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + return error(e1) + } else { + return syscall.EINVAL + } + } + return nil +} + +func rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) +} + +func syncDir(name string) error { return nil } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go new file mode 100644 index 0000000..120b8eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -0,0 +1,203 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "os" + "sync" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +const typeShift = 3 + +type memStorageLock struct { + ms *memStorage +} + +func (lock *memStorageLock) Release() { + ms := lock.ms + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock == lock { + ms.slock = nil + } + return +} + +// memStorage is a memory-backed storage. +type memStorage struct { + mu sync.Mutex + slock *memStorageLock + files map[uint64]*memFile + manifest *memFilePtr +} + +// NewMemStorage returns a new memory-backed storage implementation. +func NewMemStorage() Storage { + return &memStorage{ + files: make(map[uint64]*memFile), + } +} + +func (ms *memStorage) Lock() (util.Releaser, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock != nil { + return nil, ErrLocked + } + ms.slock = &memStorageLock{ms: ms} + return ms.slock, nil +} + +func (*memStorage) Log(str string) {} + +func (ms *memStorage) GetFile(num uint64, t FileType) File { + return &memFilePtr{ms: ms, num: num, t: t} +} + +func (ms *memStorage) GetFiles(t FileType) ([]File, error) { + ms.mu.Lock() + var ff []File + for x, _ := range ms.files { + num, mt := x>>typeShift, FileType(x)&TypeAll + if mt&t == 0 { + continue + } + ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) + } + ms.mu.Unlock() + return ff, nil +} + +func (ms *memStorage) GetManifest() (File, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.manifest == nil { + return nil, os.ErrNotExist + } + return ms.manifest, nil +} + +func (ms *memStorage) SetManifest(f File) error { + fm, ok := f.(*memFilePtr) + if !ok || fm.t != TypeManifest { + return ErrInvalidFile + } + ms.mu.Lock() + ms.manifest = fm + ms.mu.Unlock() + return nil +} + +func (*memStorage) Close() error { return nil } + +type memReader struct { + *bytes.Reader + m *memFile +} + +func (mr *memReader) Close() error { + return mr.m.Close() +} + +type memFile struct { + bytes.Buffer + ms *memStorage + open bool +} + +func (*memFile) Sync() error { return nil } +func (m *memFile) Close() error { + m.ms.mu.Lock() + m.open = false + m.ms.mu.Unlock() + return nil +} + +type memFilePtr struct { + ms *memStorage + num uint64 + t FileType +} + +func (p *memFilePtr) x() uint64 { + return p.Num()< +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "testing" +) + +func TestMemStorage(t *testing.T) { + m := NewMemStorage() + + l, err := m.Lock() + if err != nil { + t.Fatal("storage lock failed(1): ", err) + } + _, err = m.Lock() + if err == nil { + t.Fatal("expect error for second storage lock attempt") + } else { + t.Logf("storage lock got error: %s (expected)", err) + } + l.Release() + _, err = m.Lock() + if err != nil { + t.Fatal("storage lock failed(2): ", err) + } + + f := m.GetFile(1, TypeTable) + if f.Num() != 1 && f.Type() != TypeTable { + t.Fatal("invalid file number and type") + } + w, _ := f.Create() + w.Write([]byte("abc")) + w.Close() + if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { + t.Fatal("invalid GetFiles len") + } + buf := new(bytes.Buffer) + r, err := f.Open() + if err != nil { + t.Fatal("Open: got error: ", err) + } + buf.ReadFrom(r) + r.Close() + if got := buf.String(); got != "abc" { + t.Fatalf("Read: invalid value, want=abc got=%s", got) + } + if _, err := f.Open(); err != nil { + t.Fatal("Open: got error: ", err) + } + if _, err := m.GetFile(1, TypeTable).Open(); err == nil { + t.Fatal("expecting error") + } + f.Remove() + if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { + t.Fatal("invalid GetFiles len", len(ff)) + } + if _, err := f.Open(); err == nil { + t.Fatal("expecting error") + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go new file mode 100644 index 0000000..1ccf4a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -0,0 +1,127 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package storage provides storage abstraction for LevelDB. +package storage + +import ( + "errors" + "fmt" + "io" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type FileType uint32 + +const ( + TypeManifest FileType = 1 << iota + TypeJournal + TypeTable + TypeTemp + + TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp +) + +func (t FileType) String() string { + switch t { + case TypeManifest: + return "manifest" + case TypeJournal: + return "journal" + case TypeTable: + return "table" + case TypeTemp: + return "temp" + } + return fmt.Sprintf("", t) +} + +var ( + ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") + ErrLocked = errors.New("leveldb/storage: already locked") + ErrClosed = errors.New("leveldb/storage: closed") +) + +// Syncer is the interface that wraps basic Sync method. +type Syncer interface { + // Sync commits the current contents of the file to stable storage. + Sync() error +} + +// Reader is the interface that groups the basic Read, Seek, ReadAt and Close +// methods. +type Reader interface { + io.ReadSeeker + io.ReaderAt + io.Closer +} + +// Writer is the interface that groups the basic Write, Sync and Close +// methods. +type Writer interface { + io.WriteCloser + Syncer +} + +// File is the file. A file instance must be goroutine-safe. +type File interface { + // Open opens the file for read. Returns os.ErrNotExist error + // if the file does not exist. + // Returns ErrClosed if the underlying storage is closed. + Open() (r Reader, err error) + + // Create creates the file for writting. Truncate the file if + // already exist. + // Returns ErrClosed if the underlying storage is closed. + Create() (w Writer, err error) + + // Replace replaces file with newfile. + // Returns ErrClosed if the underlying storage is closed. + Replace(newfile File) error + + // Type returns the file type + Type() FileType + + // Num returns the file number. + Num() uint64 + + // Remove removes the file. + // Returns ErrClosed if the underlying storage is closed. + Remove() error +} + +// Storage is the storage. A storage instance must be goroutine-safe. +type Storage interface { + // Lock locks the storage. Any subsequent attempt to call Lock will fail + // until the last lock released. + // After use the caller should call the Release method. + Lock() (l util.Releaser, err error) + + // Log logs a string. This is used for logging. An implementation + // may write to a file, stdout or simply do nothing. + Log(str string) + + // GetFile returns a file for the given number and type. GetFile will never + // returns nil, even if the underlying storage is closed. + GetFile(num uint64, t FileType) File + + // GetFiles returns a slice of files that match the given file types. + // The file types may be OR'ed together. + GetFiles(t FileType) ([]File, error) + + // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest + // file does not exist. + GetManifest() (File, error) + + // SetManifest sets the given file as manifest file. The given file should + // be a manifest file type or error will be returned. + SetManifest(f File) error + + // Close closes the storage. It is valid to call Close multiple times. + // Other methods should not be called after the storage has been closed. + Close() error +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go new file mode 100644 index 0000000..fe33682 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go @@ -0,0 +1,459 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENE file. + +package leveldb + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +const typeShift = 4 + +var ( + tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") + tsErrFileOpen = errors.New("leveldb.testStorage: file still open") +) + +var ( + tsFSEnv = os.Getenv("GOLEVELDB_USEFS") + tsKeepFS = tsFSEnv == "2" + tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" + tsMU = &sync.Mutex{} + tsNum = 0 +) + +type tsLock struct { + ts *testStorage + r util.Releaser +} + +func (l tsLock) Release() { + l.r.Release() + l.ts.t.Log("I: storage lock released") +} + +type tsReader struct { + tf tsFile + storage.Reader +} + +func (tr tsReader) Read(b []byte) (n int, err error) { + ts := tr.tf.ts + ts.countRead(tr.tf.Type()) + n, err = tr.Reader.Read(b) + if err != nil && err != io.EOF { + ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) + } + return +} + +func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { + ts := tr.tf.ts + ts.countRead(tr.tf.Type()) + n, err = tr.Reader.ReadAt(b, off) + if err != nil && err != io.EOF { + ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) + } + return +} + +func (tr tsReader) Close() (err error) { + err = tr.Reader.Close() + tr.tf.close("reader", err) + return +} + +type tsWriter struct { + tf tsFile + storage.Writer +} + +func (tw tsWriter) Write(b []byte) (n int, err error) { + ts := tw.tf.ts + ts.mu.Lock() + defer ts.mu.Unlock() + if ts.emuWriteErr&tw.tf.Type() != 0 { + return 0, errors.New("leveldb.testStorage: emulated write error") + } + n, err = tw.Writer.Write(b) + if err != nil { + ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) + } + return +} + +func (tw tsWriter) Sync() (err error) { + ts := tw.tf.ts + ts.mu.Lock() + defer ts.mu.Unlock() + for ts.emuDelaySync&tw.tf.Type() != 0 { + ts.cond.Wait() + } + if ts.emuSyncErr&tw.tf.Type() != 0 { + return errors.New("leveldb.testStorage: emulated sync error") + } + err = tw.Writer.Sync() + if err != nil { + ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) + } + return +} + +func (tw tsWriter) Close() (err error) { + err = tw.Writer.Close() + tw.tf.close("reader", err) + return +} + +type tsFile struct { + ts *testStorage + storage.File +} + +func (tf tsFile) x() uint64 { + return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll + ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) + } + } + ts.mu.Unlock() +} + +func newTestStorage(t *testing.T) *testStorage { + var stor storage.Storage + var closeFn func() error + if tsFS { + for { + tsMU.Lock() + num := tsNum + tsNum++ + tsMU.Unlock() + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) + if _, err := os.Stat(path); err != nil { + stor, err = storage.OpenFile(path) + if err != nil { + t.Fatalf("F: cannot create storage: %v", err) + } + t.Logf("I: storage created: %s", path) + closeFn = func() error { + for _, name := range []string{"LOG.old", "LOG"} { + f, err := os.Open(filepath.Join(path, name)) + if err != nil { + continue + } + if log, err := ioutil.ReadAll(f); err != nil { + t.Logf("---------------------- %s ----------------------", name) + t.Logf("cannot read log: %v", err) + t.Logf("---------------------- %s ----------------------", name) + } else if len(log) > 0 { + t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) + t.Logf("---------------------- %s ----------------------", name) + } + f.Close() + } + if tsKeepFS { + return nil + } + return os.RemoveAll(path) + } + + break + } + } + } else { + stor = storage.NewMemStorage() + } + ts := &testStorage{ + t: t, + Storage: stor, + closeFn: closeFn, + opens: make(map[uint64]bool), + } + ts.cond.L = &ts.mu + return ts +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go new file mode 100644 index 0000000..5a79162 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go @@ -0,0 +1,469 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sort" + "sync/atomic" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// tFile holds basic information about a table. +type tFile struct { + file storage.File + seekLeft int32 + size uint64 + imin, imax iKey +} + +// Returns true if given key is after largest key of this table. +func (t *tFile) after(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 +} + +// Returns true if given key is before smallest key of this table. +func (t *tFile) before(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 +} + +// Returns true if given key range overlaps with this table key range. +func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { + return !t.after(icmp, umin) && !t.before(icmp, umax) +} + +// Cosumes one seek and return current seeks left. +func (t *tFile) consumeSeek() int32 { + return atomic.AddInt32(&t.seekLeft, -1) +} + +// Creates new tFile. +func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { + f := &tFile{ + file: file, + size: size, + imin: imin, + imax: imax, + } + + // We arrange to automatically compact this file after + // a certain number of seeks. Let's assume: + // (1) One seek costs 10ms + // (2) Writing or reading 1MB costs 10ms (100MB/s) + // (3) A compaction of 1MB does 25MB of IO: + // 1MB read from this level + // 10-12MB read from next level (boundaries may be misaligned) + // 10-12MB written to next level + // This implies that 25 seeks cost the same as the compaction + // of 1MB of data. I.e., one seek costs approximately the + // same as the compaction of 40KB of data. We are a little + // conservative and allow approximately one seek for every 16KB + // of data before triggering a compaction. + f.seekLeft = int32(size / 16384) + if f.seekLeft < 100 { + f.seekLeft = 100 + } + + return f +} + +// tFiles hold multiple tFile. +type tFiles []*tFile + +func (tf tFiles) Len() int { return len(tf) } +func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } + +// Returns true if i smallest key is less than j. +// This used for sort by key in ascending order. +func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { + a, b := tf[i], tf[j] + n := icmp.Compare(a.imin, b.imin) + if n == 0 { + return a.file.Num() < b.file.Num() + } + return n < 0 +} + +// Returns true if i file number is greater than j. +// This used for sort by file number in descending order. +func (tf tFiles) lessByNum(i, j int) bool { + return tf[i].file.Num() > tf[j].file.Num() +} + +// Sorts tables by key in ascending order. +func (tf tFiles) sortByKey(icmp *iComparer) { + sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) +} + +// Sorts tables by file number in descending order. +func (tf tFiles) sortByNum() { + sort.Sort(&tFilesSortByNum{tFiles: tf}) +} + +// Returns sum of all tables size. +func (tf tFiles) size() (sum uint64) { + for _, t := range tf { + sum += t.size + } + return sum +} + +// Searches smallest index of tables whose its smallest +// key is after or equal with given key. +func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imin, ikey) >= 0 + }) +} + +// Searches smallest index of tables whose its largest +// key is after or equal with given key. +func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imax, ikey) >= 0 + }) +} + +// Returns true if given key range overlaps with one or more +// tables key range. If unsorted is true then binary search will not be used. +func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { + if unsorted { + // Check against all files. + for _, t := range tf { + if t.overlaps(icmp, umin, umax) { + return true + } + } + return false + } + + i := 0 + if len(umin) > 0 { + // Find the earliest possible internal key for min. + i = tf.searchMax(icmp, newIKey(umin, kMaxSeq, tSeek)) + } + if i >= len(tf) { + // Beginning of range is after all files, so no overlap. + return false + } + return !tf[i].before(icmp, umax) +} + +// Returns tables whose its key range overlaps with given key range. +// If overlapped is true then the search will be expanded to tables that +// overlaps with each other. +func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { + x := len(dst) + for i := 0; i < len(tf); { + t := tf[i] + if t.overlaps(icmp, umin, umax) { + if overlapped { + // For overlapped files, check if the newly added file has + // expanded the range. If so, restart search. + if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { + umin = t.imin.ukey() + dst = dst[:x] + i = 0 + continue + } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { + umax = t.imax.ukey() + dst = dst[:x] + i = 0 + continue + } + } + + dst = append(dst, t) + } + i++ + } + + return dst +} + +// Returns tables key range. +func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { + for i, t := range tf { + if i == 0 { + imin, imax = t.imin, t.imax + continue + } + if icmp.Compare(t.imin, imin) < 0 { + imin = t.imin + } + if icmp.Compare(t.imax, imax) > 0 { + imax = t.imax + } + } + + return +} + +// Creates iterator index from tables. +func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { + if slice != nil { + var start, limit int + if slice.Start != nil { + start = tf.searchMax(icmp, iKey(slice.Start)) + } + if slice.Limit != nil { + limit = tf.searchMin(icmp, iKey(slice.Limit)) + } else { + limit = tf.Len() + } + tf = tf[start:limit] + } + return iterator.NewArrayIndexer(&tFilesArrayIndexer{ + tFiles: tf, + tops: tops, + icmp: icmp, + slice: slice, + ro: ro, + }) +} + +// Tables iterator index. +type tFilesArrayIndexer struct { + tFiles + tops *tOps + icmp *iComparer + slice *util.Range + ro *opt.ReadOptions +} + +func (a *tFilesArrayIndexer) Search(key []byte) int { + return a.searchMax(a.icmp, iKey(key)) +} + +func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { + if i == 0 || i == a.Len()-1 { + return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) + } + return a.tops.newIterator(a.tFiles[i], nil, a.ro) +} + +// Helper type for sortByKey. +type tFilesSortByKey struct { + tFiles + icmp *iComparer +} + +func (x *tFilesSortByKey) Less(i, j int) bool { + return x.lessByKey(x.icmp, i, j) +} + +// Helper type for sortByNum. +type tFilesSortByNum struct { + tFiles +} + +func (x *tFilesSortByNum) Less(i, j int) bool { + return x.lessByNum(i, j) +} + +// Table operations. +type tOps struct { + s *session + cache cache.Cache + cacheNS cache.Namespace + bpool *util.BufferPool +} + +// Creates an empty table and returns table writer. +func (t *tOps) create() (*tWriter, error) { + file := t.s.getTableFile(t.s.allocFileNum()) + fw, err := file.Create() + if err != nil { + return nil, err + } + return &tWriter{ + t: t, + file: file, + w: fw, + tw: table.NewWriter(fw, t.s.o), + }, nil +} + +// Builds table from src iterator. +func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { + w, err := t.create() + if err != nil { + return + } + + defer func() { + if err != nil { + w.drop() + } + }() + + for src.Next() { + err = w.append(src.Key(), src.Value()) + if err != nil { + return + } + } + err = src.Error() + if err != nil { + return + } + + n = w.tw.EntriesLen() + f, err = w.finish() + return +} + +// Opens table. It returns a cache handle, which should +// be released after use. +func (t *tOps) open(f *tFile) (ch cache.Handle, err error) { + num := f.file.Num() + ch = t.cacheNS.Get(num, func() (charge int, value interface{}) { + var r storage.Reader + r, err = f.file.Open() + if err != nil { + return 0, nil + } + + var bcacheNS cache.Namespace + if bc := t.s.o.GetBlockCache(); bc != nil { + bcacheNS = bc.GetNamespace(num) + } + return 1, table.NewReader(r, int64(f.size), bcacheNS, t.bpool, t.s.o) + }) + if ch == nil && err == nil { + err = ErrClosed + } + return +} + +// Finds key/value pair whose key is greater than or equal to the +// given key. +func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).Find(key, ro) +} + +// Returns approximate offset of the given key. +func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { + ch, err := t.open(f) + if err != nil { + return + } + defer ch.Release() + offset_, err := ch.Value().(*table.Reader).OffsetOf(key) + return uint64(offset_), err +} + +// Creates an iterator from the given table. +func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + ch, err := t.open(f) + if err != nil { + return iterator.NewEmptyIterator(err) + } + iter := ch.Value().(*table.Reader).NewIterator(slice, ro) + iter.SetReleaser(ch) + return iter +} + +// Removes table from persistent storage. It waits until +// no one use the the table. +func (t *tOps) remove(f *tFile) { + num := f.file.Num() + t.cacheNS.Delete(num, func(exist, pending bool) { + if !pending { + if err := f.file.Remove(); err != nil { + t.s.logf("table@remove removing @%d %q", num, err) + } else { + t.s.logf("table@remove removed @%d", num) + } + if bc := t.s.o.GetBlockCache(); bc != nil { + bc.ZapNamespace(num) + } + } + }) +} + +// Closes the table ops instance. It will close all tables, +// regadless still used or not. +func (t *tOps) close() { + t.cache.Zap() + t.bpool.Close() +} + +// Creates new initialized table ops instance. +func newTableOps(s *session, cacheCap int) *tOps { + c := cache.NewLRUCache(cacheCap) + return &tOps{ + s: s, + cache: c, + cacheNS: c.GetNamespace(0), + bpool: util.NewBufferPool(s.o.GetBlockSize() + 5), + } +} + +// tWriter wraps the table writer. It keep track of file descriptor +// and added key range. +type tWriter struct { + t *tOps + + file storage.File + w storage.Writer + tw *table.Writer + + first, last []byte +} + +// Append key/value pair to the table. +func (w *tWriter) append(key, value []byte) error { + if w.first == nil { + w.first = append([]byte{}, key...) + } + w.last = append(w.last[:0], key...) + return w.tw.Append(key, value) +} + +// Returns true if the table is empty. +func (w *tWriter) empty() bool { + return w.first == nil +} + +// Finalizes the table and returns table file. +func (w *tWriter) finish() (f *tFile, err error) { + err = w.tw.Close() + if err != nil { + return + } + err = w.w.Sync() + if err != nil { + w.w.Close() + return + } + w.w.Close() + f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) + return +} + +// Drops the table. +func (w *tWriter) drop() { + w.w.Close() + w.file.Remove() + w.t.s.reuseFileNum(w.file.Num()) + w.w = nil + w.file = nil + w.tw = nil + w.first = nil + w.last = nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go new file mode 100644 index 0000000..a6cf83b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go @@ -0,0 +1,131 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func (b *block) TestNewIterator(slice *util.Range) iterator.Iterator { + return b.newIterator(slice, false, nil) +} + +var _ = testutil.Defer(func() { + Describe("Block", func() { + Build := func(kv *testutil.KeyValue, restartInterval int) *block { + // Building the block. + bw := &blockWriter{ + restartInterval: restartInterval, + scratch: make([]byte, 30), + } + kv.Iterate(func(i int, key, value []byte) { + bw.append(key, value) + }) + bw.finish() + + // Opening the block. + data := bw.buf.Bytes() + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + return &block{ + tr: &Reader{cmp: comparer.DefaultComparer}, + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + } + } + + Describe("read test", func() { + for restartInterval := 1; restartInterval <= 5; restartInterval++ { + Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { + kv := &testutil.KeyValue{} + Text := func() string { + return fmt.Sprintf("and %d keys", kv.Len()) + } + + Test := func() { + // Make block. + br := Build(kv, restartInterval) + // Do testing. + testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) + } + + Describe(Text(), Test) + + kv.PutString("", "empty") + Describe(Text(), Test) + + kv.PutString("a1", "foo") + Describe(Text(), Test) + + kv.PutString("a2", "v") + Describe(Text(), Test) + + kv.PutString("a3qqwrkks", "hello") + Describe(Text(), Test) + + kv.PutString("a4", "bar") + Describe(Text(), Test) + + kv.PutString("a5111111", "v5") + kv.PutString("a6", "") + kv.PutString("a7", "v7") + kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") + kv.PutString("b", "v9") + kv.PutString("c9", "v9") + kv.PutString("c91", "v9") + kv.PutString("d0", "v9") + Describe(Text(), Test) + }) + } + }) + + Describe("out-of-bound slice test", func() { + kv := &testutil.KeyValue{} + kv.PutString("k1", "v1") + kv.PutString("k2", "v2") + kv.PutString("k3abcdefgg", "v3") + kv.PutString("k4", "v4") + kv.PutString("k5", "v5") + for restartInterval := 1; restartInterval <= 5; restartInterval++ { + Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { + // Make block. + br := Build(kv, restartInterval) + + Test := func(r *util.Range) func(done Done) { + return func(done Done) { + iter := br.newIterator(r, false, nil) + Expect(iter.Error()).ShouldNot(HaveOccurred()) + + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: iter, + } + + testutil.DoIteratorTesting(&t) + done <- true + } + } + + It("Should do iterations and seeks correctly #0", + Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) + + It("Should do iterations and seeks correctly #1", + Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) + }) + } + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go new file mode 100644 index 0000000..e1f9fb9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -0,0 +1,934 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "sort" + "strings" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrNotFound = util.ErrNotFound + ErrIterReleased = errors.New("leveldb/table: iterator released") +) + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +type block struct { + tr *Reader + data []byte + restartsLen int + restartsOffset int + // Whether checksum is verified and valid. + checksum bool +} + +func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) { + index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) + offset += 1 // shared always zero, since this is a restart point + v1, n1 := binary.Uvarint(b.data[offset:]) // key length + _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length + m := offset + n1 + n2 + return b.tr.cmp.Compare(b.data[m:m+int(v1)], key) > 0 + }) + rstart - 1 + if index < rstart { + // The smallest key is greater-than key sought. + index = rstart + } + offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) + return +} + +func (b *block) restartIndex(rstart, rlimit, offset int) int { + return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset + }) + rstart - 1 +} + +func (b *block) restartOffset(index int) int { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) +} + +func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { + if offset >= b.restartsOffset { + if offset != b.restartsOffset { + err = errors.New("leveldb/table: Reader: BlockEntry: invalid block (block entries offset not aligned)") + } + return + } + v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length + v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length + v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length + m := n0 + n1 + n2 + n = m + int(v1) + int(v2) + if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { + err = errors.New("leveldb/table: Reader: invalid block (block entries corrupted)") + return + } + key = b.data[offset+m : offset+m+int(v1)] + value = b.data[offset+m+int(v1) : offset+n] + nShared = int(v0) + return +} + +func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releaser) *blockIter { + bi := &blockIter{ + block: b, + cache: cache, + // Valid key should never be nil. + key: make([]byte, 0), + dir: dirSOI, + riStart: 0, + riLimit: b.restartsLen, + offsetStart: 0, + offsetRealStart: 0, + offsetLimit: b.restartsOffset, + } + if slice != nil { + if slice.Start != nil { + if bi.Seek(slice.Start) { + bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) + bi.offsetStart = b.restartOffset(bi.riStart) + bi.offsetRealStart = bi.prevOffset + } else { + bi.riStart = b.restartsLen + bi.offsetStart = b.restartsOffset + bi.offsetRealStart = b.restartsOffset + } + } + if slice.Limit != nil { + if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { + bi.offsetLimit = bi.prevOffset + bi.riLimit = bi.restartIndex + 1 + } + } + bi.reset() + if bi.offsetStart > bi.offsetLimit { + bi.sErr(errors.New("leveldb/table: Reader: invalid slice range")) + } + } + return bi +} + +func (b *block) Release() { + if b.tr.bpool != nil { + b.tr.bpool.Put(b.data) + } + b.tr = nil + b.data = nil +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type blockIter struct { + block *block + cache, releaser util.Releaser + key, value []byte + offset int + // Previous offset, only filled by Next. + prevOffset int + prevNode []int + prevKeys []byte + restartIndex int + // Iterator direction. + dir dir + // Restart index slice range. + riStart int + riLimit int + // Offset slice range. + offsetStart int + offsetRealStart int + offsetLimit int + // Error. + err error +} + +func (i *blockIter) sErr(err error) { + i.err = err + i.key = nil + i.value = nil + i.prevNode = nil + i.prevKeys = nil +} + +func (i *blockIter) reset() { + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.restartIndex = i.riStart + i.offset = i.offsetStart + i.dir = dirSOI + i.key = i.key[:0] + i.value = nil +} + +func (i *blockIter) isFirst() bool { + switch i.dir { + case dirForward: + return i.prevOffset == i.offsetRealStart + case dirBackward: + return len(i.prevNode) == 1 && i.restartIndex == i.riStart + } + return false +} + +func (i *blockIter) isLast() bool { + switch i.dir { + case dirForward, dirBackward: + return i.offset == i.offsetLimit + } + return false +} + +func (i *blockIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirSOI + return i.Next() +} + +func (i *blockIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirEOI + return i.Prev() +} + +func (i *blockIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ri, offset, err := i.block.seek(i.riStart, i.riLimit, key) + if err != nil { + i.sErr(err) + return false + } + i.restartIndex = ri + i.offset = max(i.offsetStart, offset) + if i.dir == dirSOI || i.dir == dirEOI { + i.dir = dirForward + } + for i.Next() { + if i.block.tr.cmp.Compare(i.key, key) >= 0 { + return true + } + } + return false +} + +func (i *blockIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirSOI { + i.restartIndex = i.riStart + i.offset = i.offsetStart + } else if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + for i.offset < i.offsetRealStart { + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(err) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.offset += n + } + if i.offset >= i.offsetLimit { + i.dir = dirEOI + if i.offset != i.offsetLimit { + i.sErr(errors.New("leveldb/table: Reader: Next: invalid block (block entries offset not aligned)")) + } + return false + } + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(err) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.prevOffset = i.offset + i.offset += n + i.dir = dirForward + return true +} + +func (i *blockIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + var ri int + if i.dir == dirForward { + // Change direction. + i.offset = i.prevOffset + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) + i.dir = dirBackward + } else if i.dir == dirEOI { + // At the end of iterator. + i.restartIndex = i.riLimit + i.offset = i.offsetLimit + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.riLimit - 1 + i.dir = dirBackward + } else if len(i.prevNode) == 1 { + // This is the end of a restart range. + i.offset = i.prevNode[0] + i.prevNode = i.prevNode[:0] + if i.restartIndex == i.riStart { + i.dir = dirSOI + return false + } + i.restartIndex-- + ri = i.restartIndex + } else { + // In the middle of restart range, get from cache. + n := len(i.prevNode) - 3 + node := i.prevNode[n:] + i.prevNode = i.prevNode[:n] + // Get the key. + ko := node[0] + i.key = append(i.key[:0], i.prevKeys[ko:]...) + i.prevKeys = i.prevKeys[:ko] + // Get the value. + vo := node[1] + vl := vo + node[2] + i.value = i.block.data[vo:vl] + i.offset = vl + return true + } + // Build entries cache. + i.key = i.key[:0] + i.value = nil + offset := i.block.restartOffset(ri) + if offset == i.offset { + ri -= 1 + if ri < 0 { + i.dir = dirSOI + return false + } + offset = i.block.restartOffset(ri) + } + i.prevNode = append(i.prevNode, offset) + for { + key, value, nShared, n, err := i.block.entry(offset) + if err != nil { + i.sErr(err) + return false + } + if offset >= i.offsetRealStart { + if i.value != nil { + // Appends 3 variables: + // 1. Previous keys offset + // 2. Value offset in the data block + // 3. Value length + i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) + i.prevKeys = append(i.prevKeys, i.key...) + } + i.value = value + } + i.key = append(i.key[:nShared], key...) + offset += n + // Stop if target offset reached. + if offset >= i.offset { + if offset != i.offset { + i.sErr(errors.New("leveldb/table: Reader: Prev: invalid block (block entries offset not aligned)")) + return false + } + + break + } + } + i.restartIndex = ri + i.offset = offset + return true +} + +func (i *blockIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *blockIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *blockIter) Release() { + if i.dir > dirReleased { + i.block = nil + i.prevNode = nil + i.prevKeys = nil + i.key = nil + i.value = nil + i.dir = dirReleased + if i.cache != nil { + i.cache.Release() + i.cache = nil + } + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *blockIter) SetReleaser(releaser util.Releaser) { + if i.dir > dirReleased { + i.releaser = releaser + } +} + +func (i *blockIter) Valid() bool { + return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) +} + +func (i *blockIter) Error() error { + return i.err +} + +type filterBlock struct { + tr *Reader + data []byte + oOffset int + baseLg uint + filtersNum int +} + +func (b *filterBlock) contains(offset uint64, key []byte) bool { + i := int(offset >> b.baseLg) + if i < b.filtersNum { + o := b.data[b.oOffset+i*4:] + n := int(binary.LittleEndian.Uint32(o)) + m := int(binary.LittleEndian.Uint32(o[4:])) + if n < m && m <= b.oOffset { + return b.tr.filter.Contains(b.data[n:m], key) + } else if n == m { + return false + } + } + return true +} + +func (b *filterBlock) Release() { + if b.tr.bpool != nil { + b.tr.bpool.Put(b.data) + } + b.tr = nil + b.data = nil +} + +type indexIter struct { + *blockIter + slice *util.Range + // Options + checksum bool + fillCache bool +} + +func (i *indexIter) Get() iterator.Iterator { + value := i.Value() + if value == nil { + return nil + } + dataBH, n := decodeBlockHandle(value) + if n == 0 { + return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid table (bad data block handle)")) + } + var slice *util.Range + if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { + slice = i.slice + } + return i.blockIter.block.tr.getDataIter(dataBH, slice, i.checksum, i.fillCache) +} + +// Reader is a table reader. +type Reader struct { + reader io.ReaderAt + cache cache.Namespace + err error + bpool *util.BufferPool + // Options + cmp comparer.Comparer + filter filter.Filter + checksum bool + strictIter bool + + dataEnd int64 + indexBH, filterBH blockHandle +} + +func verifyChecksum(data []byte) bool { + n := len(data) - 4 + checksum0 := binary.LittleEndian.Uint32(data[n:]) + checksum1 := util.NewCRC(data[:n]).Value() + return checksum0 == checksum1 +} + +func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) { + data := r.bpool.Get(int(bh.length + blockTrailerLen)) + if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { + return nil, err + } + if checksum || r.checksum { + if !verifyChecksum(data) { + r.bpool.Put(data) + return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)") + } + } + switch data[bh.length] { + case blockTypeNoCompression: + data = data[:bh.length] + case blockTypeSnappyCompression: + decLen, err := snappy.DecodedLen(data[:bh.length]) + if err != nil { + return nil, err + } + tmp := data + data, err = snappy.Decode(r.bpool.Get(decLen), tmp[:bh.length]) + r.bpool.Put(tmp) + if err != nil { + return nil, err + } + default: + r.bpool.Put(data) + return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length]) + } + return data, nil +} + +func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) { + data, err := r.readRawBlock(bh, checksum) + if err != nil { + return nil, err + } + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + b := &block{ + tr: r, + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + checksum: checksum || r.checksum, + } + return b, nil +} + +func (r *Reader) readBlockCached(bh blockHandle, checksum, fillCache bool) (*block, util.Releaser, error) { + if r.cache != nil { + var err error + ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) { + if !fillCache { + return 0, nil + } + var b *block + b, err = r.readBlock(bh, checksum) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + if ch != nil { + b, ok := ch.Value().(*block) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type") + } + if !b.checksum && (r.checksum || checksum) { + if !verifyChecksum(b.data) { + ch.Release() + return nil, nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)") + } + b.checksum = true + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readBlock(bh, checksum) + return b, b, err +} + +func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { + data, err := r.readRawBlock(bh, true) + if err != nil { + return nil, err + } + n := len(data) + if n < 5 { + return nil, errors.New("leveldb/table: Reader: invalid filter block (too short)") + } + m := n - 5 + oOffset := int(binary.LittleEndian.Uint32(data[m:])) + if oOffset > m { + return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)") + } + b := &filterBlock{ + tr: r, + data: data, + oOffset: oOffset, + baseLg: uint(data[n-1]), + filtersNum: (m - oOffset) / 4, + } + return b, nil +} + +func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { + if r.cache != nil { + var err error + ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) { + if !fillCache { + return 0, nil + } + var b *filterBlock + b, err = r.readFilterBlock(bh) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + if ch != nil { + b, ok := ch.Value().(*filterBlock) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readFilterBlock(bh) + return b, b, err +} + +func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator { + b, rel, err := r.readBlockCached(dataBH, checksum, fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + return b.newIterator(slice, false, rel) +} + +// NewIterator creates an iterator from the table. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// table. And a nil Range.Limit is treated as a key after all keys in +// the table. +// +// The returned iterator is not goroutine-safe and should be released +// when not used. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + fillCache := !ro.GetDontFillCache() + b, rel, err := r.readBlockCached(r.indexBH, true, fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + index := &indexIter{ + blockIter: b.newIterator(slice, true, rel), + slice: slice, + checksum: ro.GetStrict(opt.StrictBlockChecksum), + fillCache: !ro.GetDontFillCache(), + } + return iterator.NewIndexedIterator(index, r.strictIter || ro.GetStrict(opt.StrictIterator), false) +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Find returns. +func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err error) { + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) + if err != nil { + return + } + defer rel.Release() + + index := indexBlock.newIterator(nil, true, nil) + defer index.Release() + if !index.Seek(key) { + err = index.Error() + if err == nil { + err = ErrNotFound + } + return + } + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)") + return + } + if r.filter != nil { + filterBlock, rel, ferr := r.readFilterBlockCached(r.filterBH, true) + if ferr == nil { + if !filterBlock.contains(dataBH.offset, key) { + rel.Release() + return nil, nil, ErrNotFound + } + rel.Release() + } + } + data := r.getDataIter(dataBH, nil, ro.GetStrict(opt.StrictBlockChecksum), !ro.GetDontFillCache()) + defer data.Release() + if !data.Seek(key) { + err = data.Error() + if err == nil { + err = ErrNotFound + } + return + } + // Don't use block buffer, no need to copy the buffer. + rkey = data.Key() + // Use block buffer, and since the buffer will be recycled, the buffer + // need to be copied. + value = append([]byte{}, data.Value()...) + return +} + +// Get gets the value for the given key. It returns errors.ErrNotFound +// if the table does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + if r.err != nil { + err = r.err + return + } + + rkey, value, err := r.Find(key, ro) + if err == nil && r.cmp.Compare(rkey, key) != 0 { + value = nil + err = ErrNotFound + } + return +} + +// OffsetOf returns approximate offset for the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) + if err != nil { + return + } + defer rel.Release() + + index := indexBlock.newIterator(nil, true, nil) + defer index.Release() + if index.Seek(key) { + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)") + return + } + offset = int64(dataBH.offset) + return + } + err = index.Error() + if err == nil { + offset = r.dataEnd + } + return +} + +// Release implements util.Releaser. +// It also close the file if it is an io.Closer. +func (r *Reader) Release() { + if closer, ok := r.reader.(io.Closer); ok { + closer.Close() + } + r.reader = nil + r.cache = nil + r.bpool = nil +} + +// NewReader creates a new initialized table reader for the file. +// The cache and bpool is optional and can be nil. +// +// The returned table reader instance is goroutine-safe. +func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) *Reader { + if bpool == nil { + bpool = util.NewBufferPool(o.GetBlockSize() + blockTrailerLen) + } + r := &Reader{ + reader: f, + cache: cache, + bpool: bpool, + cmp: o.GetComparer(), + checksum: o.GetStrict(opt.StrictBlockChecksum), + strictIter: o.GetStrict(opt.StrictIterator), + } + if f == nil { + r.err = errors.New("leveldb/table: Reader: nil file") + return r + } + if size < footerLen { + r.err = errors.New("leveldb/table: Reader: invalid table (file size is too small)") + return r + } + var footer [footerLen]byte + if _, err := r.reader.ReadAt(footer[:], size-footerLen); err != nil && err != io.EOF { + r.err = fmt.Errorf("leveldb/table: Reader: invalid table (could not read footer): %v", err) + } + if string(footer[footerLen-len(magic):footerLen]) != magic { + r.err = errors.New("leveldb/table: Reader: invalid table (bad magic number)") + return r + } + // Decode the metaindex block handle. + metaBH, n := decodeBlockHandle(footer[:]) + if n == 0 { + r.err = errors.New("leveldb/table: Reader: invalid table (bad metaindex block handle)") + return r + } + // Decode the index block handle. + r.indexBH, n = decodeBlockHandle(footer[n:]) + if n == 0 { + r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)") + return r + } + // Read metaindex block. + metaBlock, err := r.readBlock(metaBH, true) + if err != nil { + r.err = err + return r + } + // Set data end. + r.dataEnd = int64(metaBH.offset) + metaIter := metaBlock.newIterator(nil, false, nil) + for metaIter.Next() { + key := string(metaIter.Key()) + if !strings.HasPrefix(key, "filter.") { + continue + } + fn := key[7:] + if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { + r.filter = f0 + } else { + for _, f0 := range o.GetAltFilters() { + if f0.Name() == fn { + r.filter = f0 + break + } + } + } + if r.filter != nil { + filterBH, n := decodeBlockHandle(metaIter.Value()) + if n == 0 { + continue + } + r.filterBH = filterBH + // Update data end. + r.dataEnd = int64(filterBH.offset) + break + } + } + metaIter.Release() + metaBlock.Release() + return r +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go new file mode 100644 index 0000000..c0ac70d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go @@ -0,0 +1,177 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package table allows read and write sorted key/value. +package table + +import ( + "encoding/binary" +) + +/* +Table: + +Table is consist of one or more data blocks, an optional filter block +a metaindex block, an index block and a table footer. Metaindex block +is a special block used to keep parameters of the table, such as filter +block name and its block handle. Index block is a special block used to +keep record of data blocks offset and length, index block use one as +restart interval. The key used by index block are the last key of preceding +block, shorter separator of adjacent blocks or shorter successor of the +last key of the last block. Filter block is an optional block contains +sequence of filter data generated by a filter generator. + +Table data structure: + + optional + / + +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ + | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | + +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ + + Each block followed by a 5-bytes trailer contains compression type and checksum. + +Table block trailer: + + +---------------------------+-------------------+ + | compression type (1-byte) | checksum (4-byte) | + +---------------------------+-------------------+ + + The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression + type also included in the checksum. + +Table footer: + + +------------------- 40-bytes -------------------+ + / \ + +------------------------+--------------------+------+-----------------+ + | metaindex block handle / index block handle / ---- | magic (8-bytes) | + +------------------------+--------------------+------+-----------------+ + + The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Block: + +Block is consist of one or more key/value entries and a block trailer. +Block entry shares key prefix with its preceding key until a restart +point reached. A block should contains at least one restart point. +First restart point are always zero. + +Block data structure: + + + restart point + restart point (depends on restart interval) + / / + +---------------+---------------+---------------+---------------+---------+ + | block entry 1 | block entry 2 | ... | block entry n | trailer | + +---------------+---------------+---------------+---------------+---------+ + +Key/value entry: + + +---- key len ----+ + / \ + +-------+---------+-----------+---------+--------------------+--------------+----------------+ + | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | + +-----------------+---------------------+--------------------+--------------+----------------+ + + Block entry shares key prefix with its preceding key: + Conditions: + restart_interval=2 + entry one : key=deck,value=v1 + entry two : key=dock,value=v2 + entry three: key=duck,value=v3 + The entries will be encoded as follow: + + + restart point (offset=0) + restart point (offset=16) + / / + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + \ / \ / \ / + +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ + + The block trailer will contains two restart points: + + +------------+-----------+--------+ + | 0 | 16 | 2 | + +------------+-----------+---+----+ + \ / \ + +-- restart points --+ + restart points length + +Block trailer: + + +-- 4-bytes --+ + / \ + +-----------------+-----------------+-----------------+------------------------------+ + | restart point 1 | .... | restart point n | restart points len (4-bytes) | + +-----------------+-----------------+-----------------+------------------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Filter block: + +Filter block consist of one or more filter data and a filter block trailer. +The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. + +Filter block data structure: + + + offset 1 + offset 2 + offset n + trailer offset + / / / / + +---------------+---------------+---------------+---------+ + | filter data 1 | ... | filter data n | trailer | + +---------------+---------------+---------------+---------+ + +Filter block trailer: + + +- 4-bytes -+ + / \ + +---------------+---------------+---------------+-------------------------+------------------+ + | offset 1 | .... | offset n | filter offset (4-bytes) | base Lg (1-byte) | + +-------------- +---------------+---------------+-------------------------+------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +const ( + blockTrailerLen = 5 + footerLen = 48 + + magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" + + // The block type gives the per-block compression format. + // These constants are part of the file format and should not be changed. + blockTypeNoCompression = 0 + blockTypeSnappyCompression = 1 + + // Generate new filter every 2KB of data + filterBaseLg = 11 + filterBase = 1 << filterBaseLg +) + +type blockHandle struct { + offset, length uint64 +} + +func decodeBlockHandle(src []byte) (blockHandle, int) { + offset, n := binary.Uvarint(src) + length, m := binary.Uvarint(src[n:]) + if n == 0 || m == 0 { + return blockHandle{}, 0 + } + return blockHandle{offset, length}, n + m +} + +func encodeBlockHandle(dst []byte, b blockHandle) int { + n := binary.PutUvarint(dst, b.offset) + m := binary.PutUvarint(dst[n:], b.length) + return n + m +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go new file mode 100644 index 0000000..de43cc0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go @@ -0,0 +1,17 @@ +package table + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestTable(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Table Suite") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go new file mode 100644 index 0000000..efe969a --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go @@ -0,0 +1,121 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "bytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type tableWrapper struct { + *Reader +} + +func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { + return t.Reader.Find(key, nil) +} + +func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { + return t.Reader.Get(key, nil) +} + +func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.Reader.NewIterator(slice, nil) +} + +var _ = testutil.Defer(func() { + Describe("Table", func() { + Describe("approximate offset test", func() { + var ( + buf = &bytes.Buffer{} + o = &opt.Options{ + BlockSize: 1024, + Compression: opt.NoCompression, + } + ) + + // Building the table. + tw := NewWriter(buf, o) + tw.Append([]byte("k01"), []byte("hello")) + tw.Append([]byte("k02"), []byte("hello2")) + tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) + tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) + tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) + tw.Append([]byte("k06"), []byte("hello3")) + tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) + err := tw.Close() + + It("Should be able to approximate offset of a key correctly", func() { + Expect(err).ShouldNot(HaveOccurred()) + + tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o) + CheckOffset := func(key string, expect, threshold int) { + offset, err := tr.OffsetOf([]byte(key)) + Expect(err).ShouldNot(HaveOccurred()) + Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) + } + + CheckOffset("k0", 0, 0) + CheckOffset("k01a", 0, 0) + CheckOffset("k02", 0, 0) + CheckOffset("k03", 0, 0) + CheckOffset("k04", 10000, 1000) + CheckOffset("k04a", 210000, 1000) + CheckOffset("k05", 210000, 1000) + CheckOffset("k06", 510000, 1000) + CheckOffset("k07", 510000, 1000) + CheckOffset("xyz", 610000, 2000) + }) + }) + + Describe("read test", func() { + Build := func(kv testutil.KeyValue) testutil.DB { + o := &opt.Options{ + BlockSize: 512, + BlockRestartInterval: 3, + } + buf := &bytes.Buffer{} + + // Building the table. + tw := NewWriter(buf, o) + kv.Iterate(func(i int, key, value []byte) { + tw.Append(key, value) + }) + tw.Close() + + // Opening the table. + tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o) + return tableWrapper{tr} + } + Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { + return func() { + db := Build(*kv) + if body != nil { + body(db.(tableWrapper).Reader) + } + testutil.KeyValueTesting(nil, *kv, db, nil, nil) + } + } + + testutil.AllKeyValueTesting(nil, Build, nil, nil) + Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { + It("should have correct blocks number", func() { + indexBlock, err := r.readBlock(r.indexBH, true) + Expect(err).To(BeNil()) + Expect(indexBlock.restartsLen).Should(Equal(9)) + }) + })) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go new file mode 100644 index 0000000..6e7d40b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go @@ -0,0 +1,379 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func sharedPrefixLen(a, b []byte) int { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for i < n && a[i] == b[i] { + i++ + } + return i +} + +type blockWriter struct { + restartInterval int + buf util.Buffer + nEntries int + prevKey []byte + restarts []uint32 + scratch []byte +} + +func (w *blockWriter) append(key, value []byte) { + nShared := 0 + if w.nEntries%w.restartInterval == 0 { + w.restarts = append(w.restarts, uint32(w.buf.Len())) + } else { + nShared = sharedPrefixLen(w.prevKey, key) + } + n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) + w.buf.Write(w.scratch[:n]) + w.buf.Write(key[nShared:]) + w.buf.Write(value) + w.prevKey = append(w.prevKey[:0], key...) + w.nEntries++ +} + +func (w *blockWriter) finish() { + // Write restarts entry. + if w.nEntries == 0 { + // Must have at least one restart entry. + w.restarts = append(w.restarts, 0) + } + w.restarts = append(w.restarts, uint32(len(w.restarts))) + for _, x := range w.restarts { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } +} + +func (w *blockWriter) reset() { + w.buf.Reset() + w.nEntries = 0 + w.restarts = w.restarts[:0] +} + +func (w *blockWriter) bytesLen() int { + restartsLen := len(w.restarts) + if restartsLen == 0 { + restartsLen = 1 + } + return w.buf.Len() + 4*restartsLen + 4 +} + +type filterWriter struct { + generator filter.FilterGenerator + buf util.Buffer + nKeys int + offsets []uint32 +} + +func (w *filterWriter) add(key []byte) { + if w.generator == nil { + return + } + w.generator.Add(key) + w.nKeys++ +} + +func (w *filterWriter) flush(offset uint64) { + if w.generator == nil { + return + } + for x := int(offset / filterBase); x > len(w.offsets); { + w.generate() + } +} + +func (w *filterWriter) finish() { + if w.generator == nil { + return + } + // Generate last keys. + + if w.nKeys > 0 { + w.generate() + } + w.offsets = append(w.offsets, uint32(w.buf.Len())) + for _, x := range w.offsets { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } + w.buf.WriteByte(filterBaseLg) +} + +func (w *filterWriter) generate() { + // Record offset. + w.offsets = append(w.offsets, uint32(w.buf.Len())) + // Generate filters. + if w.nKeys > 0 { + w.generator.Generate(&w.buf) + w.nKeys = 0 + } +} + +// Writer is a table writer. +type Writer struct { + writer io.Writer + err error + // Options + cmp comparer.Comparer + filter filter.Filter + compression opt.Compression + blockSize int + + dataBlock blockWriter + indexBlock blockWriter + filterBlock filterWriter + pendingBH blockHandle + offset uint64 + nEntries int + // Scratch allocated enough for 5 uvarint. Block writer should not use + // first 20-bytes since it will be used to encode block handle, which + // then passed to the block writer itself. + scratch [50]byte + comparerScratch []byte + compressionScratch []byte +} + +func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { + // Compress the buffer if necessary. + var b []byte + if compression == opt.SnappyCompression { + // Allocate scratch enough for compression and block trailer. + if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { + w.compressionScratch = make([]byte, n) + } + var compressed []byte + compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) + if err != nil { + return + } + n := len(compressed) + b = compressed[:n+blockTrailerLen] + b[n] = blockTypeSnappyCompression + } else { + tmp := buf.Alloc(blockTrailerLen) + tmp[0] = blockTypeNoCompression + b = buf.Bytes() + } + + // Calculate the checksum. + n := len(b) - 4 + checksum := util.NewCRC(b[:n]).Value() + binary.LittleEndian.PutUint32(b[n:], checksum) + + // Write the buffer to the file. + _, err = w.writer.Write(b) + if err != nil { + return + } + bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} + w.offset += uint64(len(b)) + return +} + +func (w *Writer) flushPendingBH(key []byte) { + if w.pendingBH.length == 0 { + return + } + var separator []byte + if len(key) == 0 { + separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) + } else { + separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) + } + if separator == nil { + separator = w.dataBlock.prevKey + } else { + w.comparerScratch = separator + } + n := encodeBlockHandle(w.scratch[:20], w.pendingBH) + // Append the block handle to the index block. + w.indexBlock.append(separator, w.scratch[:n]) + // Reset prev key of the data block. + w.dataBlock.prevKey = w.dataBlock.prevKey[:0] + // Clear pending block handle. + w.pendingBH = blockHandle{} +} + +func (w *Writer) finishBlock() error { + w.dataBlock.finish() + bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + return err + } + w.pendingBH = bh + // Reset the data block. + w.dataBlock.reset() + // Flush the filter block. + w.filterBlock.flush(w.offset) + return nil +} + +// Append appends key/value pair to the table. The keys passed must +// be in increasing order. +// +// It is safe to modify the contents of the arguments after Append returns. +func (w *Writer) Append(key, value []byte) error { + if w.err != nil { + return w.err + } + if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { + w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) + return w.err + } + + w.flushPendingBH(key) + // Append key/value pair to the data block. + w.dataBlock.append(key, value) + // Add key to the filter block. + w.filterBlock.add(key) + + // Finish the data block if block size target reached. + if w.dataBlock.bytesLen() >= w.blockSize { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.nEntries++ + return nil +} + +// BlocksLen returns number of blocks written so far. +func (w *Writer) BlocksLen() int { + n := w.indexBlock.nEntries + if w.pendingBH.length > 0 { + // Includes the pending block. + n++ + } + return n +} + +// EntriesLen returns number of entries added so far. +func (w *Writer) EntriesLen() int { + return w.nEntries +} + +// BytesLen returns number of bytes written so far. +func (w *Writer) BytesLen() int { + return int(w.offset) +} + +// Close will finalize the table. Calling Append is not possible +// after Close, but calling BlocksLen, EntriesLen and BytesLen +// is still possible. +func (w *Writer) Close() error { + if w.err != nil { + return w.err + } + + // Write the last data block. Or empty data block if there + // aren't any data blocks at all. + if w.dataBlock.nEntries > 0 || w.nEntries == 0 { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.flushPendingBH(nil) + + // Write the filter block. + var filterBH blockHandle + w.filterBlock.finish() + if buf := &w.filterBlock.buf; buf.Len() > 0 { + filterBH, w.err = w.writeBlock(buf, opt.NoCompression) + if w.err != nil { + return w.err + } + } + + // Write the metaindex block. + if filterBH.length > 0 { + key := []byte("filter." + w.filter.Name()) + n := encodeBlockHandle(w.scratch[:20], filterBH) + w.dataBlock.append(key, w.scratch[:n]) + } + w.dataBlock.finish() + metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the index block. + w.indexBlock.finish() + indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the table footer. + footer := w.scratch[:footerLen] + for i := range footer { + footer[i] = 0 + } + n := encodeBlockHandle(footer, metaindexBH) + encodeBlockHandle(footer[n:], indexBH) + copy(footer[footerLen-len(magic):], magic) + if _, err := w.writer.Write(footer); err != nil { + w.err = err + return w.err + } + w.offset += footerLen + + w.err = errors.New("leveldb/table: writer is closed") + return nil +} + +// NewWriter creates a new initialized table writer for the file. +// +// Table writer is not goroutine-safe. +func NewWriter(f io.Writer, o *opt.Options) *Writer { + w := &Writer{ + writer: f, + cmp: o.GetComparer(), + filter: o.GetFilter(), + compression: o.GetCompression(), + blockSize: o.GetBlockSize(), + comparerScratch: make([]byte, 0), + } + // data block + w.dataBlock.restartInterval = o.GetBlockRestartInterval() + // The first 20-bytes are used for encoding block handle. + w.dataBlock.scratch = w.scratch[20:] + // index block + w.indexBlock.restartInterval = 1 + w.indexBlock.scratch = w.scratch[20:] + // filter block + if w.filter != nil { + w.filterBlock.generator = w.filter.NewGenerator() + w.filterBlock.flush(0) + } + return w +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go new file mode 100644 index 0000000..13535f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go @@ -0,0 +1,216 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type DB interface{} + +type Put interface { + TestPut(key []byte, value []byte) error +} + +type Delete interface { + TestDelete(key []byte) error +} + +type Find interface { + TestFind(key []byte) (rkey, rvalue []byte, err error) +} + +type Get interface { + TestGet(key []byte) (value []byte, err error) +} + +type NewIterator interface { + TestNewIterator(slice *util.Range) iterator.Iterator +} + +type DBAct int + +func (a DBAct) String() string { + switch a { + case DBNone: + return "none" + case DBPut: + return "put" + case DBOverwrite: + return "overwrite" + case DBDelete: + return "delete" + case DBDeleteNA: + return "delete_na" + } + return "unknown" +} + +const ( + DBNone DBAct = iota + DBPut + DBOverwrite + DBDelete + DBDeleteNA +) + +type DBTesting struct { + Rand *rand.Rand + DB interface { + Get + Put + Delete + } + PostFn func(t *DBTesting) + Deleted, Present KeyValue + Act, LastAct DBAct + ActKey, LastActKey []byte +} + +func (t *DBTesting) post() { + if t.PostFn != nil { + t.PostFn(t) + } +} + +func (t *DBTesting) setAct(act DBAct, key []byte) { + t.LastAct, t.Act = t.Act, act + t.LastActKey, t.ActKey = t.ActKey, key +} + +func (t *DBTesting) text() string { + return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) +} + +func (t *DBTesting) Text() string { + return "DBTesting " + t.text() +} + +func (t *DBTesting) TestPresentKV(key, value []byte) { + rvalue, err := t.DB.TestGet(key) + Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) + Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) +} + +func (t *DBTesting) TestAllPresent() { + t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { + t.TestPresentKV(key, value) + }) +} + +func (t *DBTesting) TestDeletedKey(key []byte) { + _, err := t.DB.TestGet(key) + Expect(err).Should(Equal(util.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) +} + +func (t *DBTesting) TestAllDeleted() { + t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { + t.TestDeletedKey(key) + }) +} + +func (t *DBTesting) TestAll() { + dn := t.Deleted.Len() + pn := t.Present.Len() + ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { + if i >= dn { + key, value := t.Present.Index(i - dn) + t.TestPresentKV(key, value) + } else { + t.TestDeletedKey(t.Deleted.KeyAt(i)) + } + }) +} + +func (t *DBTesting) Put(key, value []byte) { + if new := t.Present.PutU(key, value); new { + t.setAct(DBPut, key) + } else { + t.setAct(DBOverwrite, key) + } + t.Deleted.Delete(key) + err := t.DB.TestPut(key, value) + Expect(err).ShouldNot(HaveOccurred(), t.Text()) + t.TestPresentKV(key, value) + t.post() +} + +func (t *DBTesting) PutRandom() bool { + if t.Deleted.Len() > 0 { + i := t.Rand.Intn(t.Deleted.Len()) + key, value := t.Deleted.Index(i) + t.Put(key, value) + return true + } + return false +} + +func (t *DBTesting) Delete(key []byte) { + if exist, value := t.Present.Delete(key); exist { + t.setAct(DBDelete, key) + t.Deleted.PutU(key, value) + } else { + t.setAct(DBDeleteNA, key) + } + err := t.DB.TestDelete(key) + Expect(err).ShouldNot(HaveOccurred(), t.Text()) + t.TestDeletedKey(key) + t.post() +} + +func (t *DBTesting) DeleteRandom() bool { + if t.Present.Len() > 0 { + i := t.Rand.Intn(t.Present.Len()) + t.Delete(t.Present.KeyAt(i)) + return true + } + return false +} + +func (t *DBTesting) RandomAct(round int) { + for i := 0; i < round; i++ { + if t.Rand.Int()%2 == 0 { + t.PutRandom() + } else { + t.DeleteRandom() + } + } +} + +func DoDBTesting(t *DBTesting) { + if t.Rand == nil { + t.Rand = NewRand() + } + + t.DeleteRandom() + t.PutRandom() + t.DeleteRandom() + t.DeleteRandom() + for i := t.Deleted.Len() / 2; i >= 0; i-- { + t.PutRandom() + } + t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) + + // Additional iterator testing + if db, ok := t.DB.(NewIterator); ok { + iter := db.TestNewIterator(nil) + Expect(iter.Error()).NotTo(HaveOccurred()) + + it := IteratorTesting{ + KeyValue: t.Present, + Iter: iter, + } + + DoIteratorTesting(&it) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go new file mode 100644 index 0000000..8b49bfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go @@ -0,0 +1,327 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" +) + +type IterAct int + +func (a IterAct) String() string { + switch a { + case IterNone: + return "none" + case IterFirst: + return "first" + case IterLast: + return "last" + case IterPrev: + return "prev" + case IterNext: + return "next" + case IterSeek: + return "seek" + case IterSOI: + return "soi" + case IterEOI: + return "eoi" + } + return "unknown" +} + +const ( + IterNone IterAct = iota + IterFirst + IterLast + IterPrev + IterNext + IterSeek + IterSOI + IterEOI +) + +type IteratorTesting struct { + KeyValue + Iter iterator.Iterator + Rand *rand.Rand + PostFn func(t *IteratorTesting) + Pos int + Act, LastAct IterAct + + once bool +} + +func (t *IteratorTesting) init() { + if !t.once { + t.Pos = -1 + t.once = true + } +} + +func (t *IteratorTesting) post() { + if t.PostFn != nil { + t.PostFn(t) + } +} + +func (t *IteratorTesting) setAct(act IterAct) { + t.LastAct, t.Act = t.Act, act +} + +func (t *IteratorTesting) text() string { + return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) +} + +func (t *IteratorTesting) Text() string { + return "IteratorTesting is " + t.text() +} + +func (t *IteratorTesting) IsFirst() bool { + t.init() + return t.Len() > 0 && t.Pos == 0 +} + +func (t *IteratorTesting) IsLast() bool { + t.init() + return t.Len() > 0 && t.Pos == t.Len()-1 +} + +func (t *IteratorTesting) TestKV() { + t.init() + key, value := t.Index(t.Pos) + Expect(t.Iter.Key()).NotTo(BeNil()) + Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) + Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) +} + +func (t *IteratorTesting) First() { + t.init() + t.setAct(IterFirst) + + ok := t.Iter.First() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Len() > 0 { + t.Pos = 0 + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = -1 + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Last() { + t.init() + t.setAct(IterLast) + + ok := t.Iter.Last() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Len() > 0 { + t.Pos = t.Len() - 1 + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = 0 + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Next() { + t.init() + t.setAct(IterNext) + + ok := t.Iter.Next() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Pos < t.Len()-1 { + t.Pos++ + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = t.Len() + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Prev() { + t.init() + t.setAct(IterPrev) + + ok := t.Iter.Prev() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Pos > 0 { + t.Pos-- + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = -1 + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Seek(i int) { + t.init() + t.setAct(IterSeek) + + key, _ := t.Index(i) + oldKey, _ := t.IndexOrNil(t.Pos) + + ok := t.Iter.Seek(key) + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) + + t.Pos = i + t.TestKV() + t.post() +} + +func (t *IteratorTesting) SeekInexact(i int) { + t.init() + t.setAct(IterSeek) + var key0 []byte + key1, _ := t.Index(i) + if i > 0 { + key0, _ = t.Index(i - 1) + } + key := BytesSeparator(key0, key1) + oldKey, _ := t.IndexOrNil(t.Pos) + + ok := t.Iter.Seek(key) + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) + + t.Pos = i + t.TestKV() + t.post() +} + +func (t *IteratorTesting) SeekKey(key []byte) { + t.init() + t.setAct(IterSeek) + oldKey, _ := t.IndexOrNil(t.Pos) + i := t.Search(key) + + ok := t.Iter.Seek(key) + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if i < t.Len() { + key_, _ := t.Index(i) + Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) + t.Pos = i + t.TestKV() + } else { + Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) + } + + t.Pos = i + t.post() +} + +func (t *IteratorTesting) SOI() { + t.init() + t.setAct(IterSOI) + Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) + for i := 0; i < 3; i++ { + t.Prev() + } + t.post() +} + +func (t *IteratorTesting) EOI() { + t.init() + t.setAct(IterEOI) + Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) + for i := 0; i < 3; i++ { + t.Next() + } + t.post() +} + +func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { + t.init() + for old := t.Pos; t.Pos > 0; old = t.Pos { + fn(t) + Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) + } +} + +func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { + t.init() + for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { + fn(t) + Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) + } +} + +func (t *IteratorTesting) PrevAll() { + t.WalkPrev(func(t *IteratorTesting) { + t.Prev() + }) +} + +func (t *IteratorTesting) NextAll() { + t.WalkNext(func(t *IteratorTesting) { + t.Next() + }) +} + +func DoIteratorTesting(t *IteratorTesting) { + if t.Rand == nil { + t.Rand = NewRand() + } + t.SOI() + t.NextAll() + t.First() + t.SOI() + t.NextAll() + t.EOI() + t.PrevAll() + t.Last() + t.EOI() + t.PrevAll() + t.SOI() + + t.NextAll() + t.PrevAll() + t.NextAll() + t.Last() + t.PrevAll() + t.First() + t.NextAll() + t.EOI() + + ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { + t.Seek(i) + }) + + ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { + t.SeekInexact(i) + }) + + ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { + t.Seek(i) + if i%2 != 0 { + t.PrevAll() + t.SOI() + } else { + t.NextAll() + t.EOI() + } + }) + + for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { + t.SeekKey([]byte(key)) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go new file mode 100644 index 0000000..752c980 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go @@ -0,0 +1,352 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + "sort" + "strings" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type KeyValueEntry struct { + key, value []byte +} + +type KeyValue struct { + entries []KeyValueEntry + nbytes int +} + +func (kv *KeyValue) Put(key, value []byte) { + if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { + panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) + } + kv.entries = append(kv.entries, KeyValueEntry{key, value}) + kv.nbytes += len(key) + len(value) +} + +func (kv *KeyValue) PutString(key, value string) { + kv.Put([]byte(key), []byte(value)) +} + +func (kv *KeyValue) PutU(key, value []byte) bool { + if i, exist := kv.Get(key); !exist { + if i < kv.Len() { + kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) + kv.entries[i] = KeyValueEntry{key, value} + } else { + kv.entries = append(kv.entries, KeyValueEntry{key, value}) + } + kv.nbytes += len(key) + len(value) + return true + } else { + kv.nbytes += len(value) - len(kv.ValueAt(i)) + kv.entries[i].value = value + } + return false +} + +func (kv *KeyValue) PutUString(key, value string) bool { + return kv.PutU([]byte(key), []byte(value)) +} + +func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { + i, exist := kv.Get(key) + if exist { + value = kv.entries[i].value + kv.DeleteIndex(i) + } + return +} + +func (kv *KeyValue) DeleteIndex(i int) bool { + if i < kv.Len() { + kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) + kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) + return true + } + return false +} + +func (kv KeyValue) Len() int { + return len(kv.entries) +} + +func (kv *KeyValue) Size() int { + return kv.nbytes +} + +func (kv KeyValue) KeyAt(i int) []byte { + return kv.entries[i].key +} + +func (kv KeyValue) ValueAt(i int) []byte { + return kv.entries[i].value +} + +func (kv KeyValue) Index(i int) (key, value []byte) { + if i < 0 || i >= len(kv.entries) { + panic(fmt.Sprintf("Index #%d: out of range", i)) + } + return kv.entries[i].key, kv.entries[i].value +} + +func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { + key, value = kv.Index(i) + var key0 []byte + var key1 = kv.KeyAt(i) + if i > 0 { + key0 = kv.KeyAt(i - 1) + } + key_ = BytesSeparator(key0, key1) + return +} + +func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { + if i >= 0 && i < len(kv.entries) { + return kv.entries[i].key, kv.entries[i].value + } + return nil, nil +} + +func (kv KeyValue) IndexString(i int) (key, value string) { + key_, _value := kv.Index(i) + return string(key_), string(_value) +} + +func (kv KeyValue) Search(key []byte) int { + return sort.Search(kv.Len(), func(i int) bool { + return cmp.Compare(kv.KeyAt(i), key) >= 0 + }) +} + +func (kv KeyValue) SearchString(key string) int { + return kv.Search([]byte(key)) +} + +func (kv KeyValue) Get(key []byte) (i int, exist bool) { + i = kv.Search(key) + if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { + exist = true + } + return +} + +func (kv KeyValue) GetString(key string) (i int, exist bool) { + return kv.Get([]byte(key)) +} + +func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { + for i, x := range kv.entries { + fn(i, x.key, x.value) + } +} + +func (kv KeyValue) IterateString(fn func(i int, key, value string)) { + kv.Iterate(func(i int, key, value []byte) { + fn(i, string(key), string(value)) + }) +} + +func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { + ShuffledIndex(rnd, kv.Len(), 1, func(i int) { + fn(i, kv.entries[i].key, kv.entries[i].value) + }) +} + +func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { + kv.IterateShuffled(rnd, func(i int, key, value []byte) { + fn(i, string(key), string(value)) + }) +} + +func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { + for i := range kv.entries { + key_, key, value := kv.IndexInexact(i) + fn(i, key_, key, value) + } +} + +func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { + kv.IterateInexact(func(i int, key_, key, value []byte) { + fn(i, string(key_), string(key), string(value)) + }) +} + +func (kv KeyValue) Clone() KeyValue { + return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} +} + +func (kv KeyValue) Slice(start, limit int) KeyValue { + if start < 0 || limit > kv.Len() { + panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) + } else if limit < start { + panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) + } + return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} +} + +func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { + start_ := 0 + limit_ := kv.Len() + if start != nil { + start_ = kv.Search(start) + } + if limit != nil { + limit_ = kv.Search(limit) + } + return kv.Slice(start_, limit_) +} + +func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { + return kv.SliceKey([]byte(start), []byte(limit)) +} + +func (kv KeyValue) SliceRange(r *util.Range) KeyValue { + if r != nil { + return kv.SliceKey(r.Start, r.Limit) + } + return kv.Clone() +} + +func (kv KeyValue) Range(start, limit int) (r util.Range) { + if kv.Len() > 0 { + if start == kv.Len() { + r.Start = BytesAfter(kv.KeyAt(start - 1)) + } else { + r.Start = kv.KeyAt(start) + } + } + if limit < kv.Len() { + r.Limit = kv.KeyAt(limit) + } + return +} + +func KeyValue_EmptyKey() *KeyValue { + kv := &KeyValue{} + kv.PutString("", "v") + return kv +} + +func KeyValue_EmptyValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("abc", "") + kv.PutString("abcd", "") + return kv +} + +func KeyValue_OneKeyValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("abc", "v") + return kv +} + +func KeyValue_BigValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("big1", strings.Repeat("1", 200000)) + return kv +} + +func KeyValue_SpecialKey() *KeyValue { + kv := &KeyValue{} + kv.PutString("\xff\xff", "v3") + return kv +} + +func KeyValue_MultipleKeyValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("a", "v") + kv.PutString("aa", "v1") + kv.PutString("aaa", "v2") + kv.PutString("aaacccccccccc", "v2") + kv.PutString("aaaccccccccccd", "v3") + kv.PutString("aaaccccccccccf", "v4") + kv.PutString("aaaccccccccccfg", "v5") + kv.PutString("ab", "v6") + kv.PutString("abc", "v7") + kv.PutString("abcd", "v8") + kv.PutString("accccccccccccccc", "v9") + kv.PutString("b", "v10") + kv.PutString("bb", "v11") + kv.PutString("bc", "v12") + kv.PutString("c", "v13") + kv.PutString("c1", "v13") + kv.PutString("czzzzzzzzzzzzzz", "v14") + kv.PutString("fffffffffffffff", "v15") + kv.PutString("g11", "v15") + kv.PutString("g111", "v15") + kv.PutString("g111\xff", "v15") + kv.PutString("zz", "v16") + kv.PutString("zzzzzzz", "v16") + kv.PutString("zzzzzzzzzzzzzzzz", "v16") + return kv +} + +var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") + +func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { + if rnd == nil { + rnd = NewRand() + } + if maxlen < minlen { + panic("max len should >= min len") + } + + rrand := func(min, max int) int { + if min == max { + return max + } + return rnd.Intn(max-min) + min + } + + kv := &KeyValue{} + endC := byte(len(keymap) - 1) + gen := make([]byte, 0, maxlen) + for i := 0; i < n; i++ { + m := rrand(minlen, maxlen) + last := gen + retry: + gen = last[:m] + if k := len(last); m > k { + for j := k; j < m; j++ { + gen[j] = 0 + } + } else { + for j := m - 1; j >= 0; j-- { + c := last[j] + if c == endC { + continue + } + gen[j] = c + 1 + for j += 1; j < m; j++ { + gen[j] = 0 + } + goto ok + } + if m < maxlen { + m++ + goto retry + } + panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) + ok: + } + key := make([]byte, m) + for j := 0; j < m; j++ { + key[j] = keymap[gen[j]] + } + value := make([]byte, rrand(vminlen, vmaxlen)) + for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { + value[n] = 'x' + } + kv.Put(key, value) + } + return kv +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go new file mode 100644 index 0000000..7b7fbd7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go @@ -0,0 +1,150 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { + if rnd == nil { + rnd = NewRand() + } + + if p == nil { + BeforeEach(func() { + p = setup(kv) + }) + AfterEach(func() { + teardown(p) + }) + } + + It("Should find all keys with Find", func() { + if db, ok := p.(Find); ok { + ShuffledIndex(nil, kv.Len(), 1, func(i int) { + key_, key, value := kv.IndexInexact(i) + + // Using exact key. + rkey, rvalue, err := db.TestFind(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) + Expect(rkey).Should(Equal(key), "Key") + Expect(rvalue).Should(Equal(value), "Value for key %q", key) + + // Using inexact key. + rkey, rvalue, err = db.TestFind(key_) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) + Expect(rkey).Should(Equal(key)) + Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) + }) + } + }) + + It("Should return error if the key is not present", func() { + if db, ok := p.(Find); ok { + var key []byte + if kv.Len() > 0 { + key_, _ := kv.Index(kv.Len() - 1) + key = BytesAfter(key_) + } + rkey, _, err := db.TestFind(key) + Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) + Expect(err).Should(Equal(util.ErrNotFound)) + } + }) + + It("Should only find exact key with Get", func() { + if db, ok := p.(Get); ok { + ShuffledIndex(nil, kv.Len(), 1, func(i int) { + key_, key, value := kv.IndexInexact(i) + + // Using exact key. + rvalue, err := db.TestGet(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) + Expect(rvalue).Should(Equal(value), "Value for key %q", key) + + // Using inexact key. + if len(key_) > 0 { + _, err = db.TestGet(key_) + Expect(err).Should(HaveOccurred(), "Error for key %q", key_) + Expect(err).Should(Equal(util.ErrNotFound)) + } + }) + } + }) + + TestIter := func(r *util.Range, _kv KeyValue) { + if db, ok := p.(NewIterator); ok { + iter := db.TestNewIterator(r) + Expect(iter.Error()).ShouldNot(HaveOccurred()) + + t := IteratorTesting{ + KeyValue: _kv, + Iter: iter, + } + + DoIteratorTesting(&t) + } + } + + It("Should iterates and seeks correctly", func(done Done) { + TestIter(nil, kv.Clone()) + done <- true + }, 3.0) + + RandomIndex(rnd, kv.Len(), kv.Len(), func(i int) { + type slice struct { + r *util.Range + start, limit int + } + + key_, _, _ := kv.IndexInexact(i) + for _, x := range []slice{ + {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, + {&util.Range{Start: nil, Limit: key_}, 0, i}, + } { + It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { + TestIter(x.r, kv.Slice(x.start, x.limit)) + done <- true + }, 3.0) + } + }) + + RandomRange(rnd, kv.Len(), kv.Len(), func(start, limit int) { + It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { + r := kv.Range(start, limit) + TestIter(&r, kv.Slice(start, limit)) + done <- true + }, 3.0) + }) +} + +func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { + Test := func(kv *KeyValue) func() { + return func() { + var p DB + if body != nil { + p = body(*kv) + } + KeyValueTesting(rnd, *kv, p, setup, teardown) + } + } + + Describe("with no key/value (empty)", Test(&KeyValue{})) + Describe("with empty key", Test(KeyValue_EmptyKey())) + Describe("with empty value", Test(KeyValue_EmptyValue())) + Describe("with one key/value", Test(KeyValue_OneKeyValue())) + Describe("with big value", Test(KeyValue_BigValue())) + Describe("with special key", Test(KeyValue_SpecialKey())) + Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go new file mode 100644 index 0000000..e677e1d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go @@ -0,0 +1,585 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + storageMu sync.Mutex + storageUseFS bool = true + storageKeepFS bool = false + storageNum int +) + +type StorageMode int + +const ( + ModeOpen StorageMode = 1 << iota + ModeCreate + ModeRemove + ModeRead + ModeWrite + ModeSync + ModeClose +) + +const ( + modeOpen = iota + modeCreate + modeRemove + modeRead + modeWrite + modeSync + modeClose + + modeCount +) + +const ( + typeManifest = iota + typeJournal + typeTable + typeTemp + + typeCount +) + +const flattenCount = modeCount * typeCount + +func flattenType(m StorageMode, t storage.FileType) int { + var x int + switch m { + case ModeOpen: + x = modeOpen + case ModeCreate: + x = modeCreate + case ModeRemove: + x = modeRemove + case ModeRead: + x = modeRead + case ModeWrite: + x = modeWrite + case ModeSync: + x = modeSync + case ModeClose: + x = modeClose + default: + panic("invalid storage mode") + } + x *= typeCount + switch t { + case storage.TypeManifest: + return x + typeManifest + case storage.TypeJournal: + return x + typeJournal + case storage.TypeTable: + return x + typeTable + case storage.TypeTemp: + return x + typeTemp + default: + panic("invalid file type") + } +} + +func listFlattenType(m StorageMode, t storage.FileType) []int { + ret := make([]int, 0, flattenCount) + add := func(x int) { + x *= typeCount + switch { + case t&storage.TypeManifest != 0: + ret = append(ret, x+typeManifest) + case t&storage.TypeJournal != 0: + ret = append(ret, x+typeJournal) + case t&storage.TypeTable != 0: + ret = append(ret, x+typeTable) + case t&storage.TypeTemp != 0: + ret = append(ret, x+typeTemp) + } + } + switch { + case m&ModeOpen != 0: + add(modeOpen) + case m&ModeCreate != 0: + add(modeCreate) + case m&ModeRemove != 0: + add(modeRemove) + case m&ModeRead != 0: + add(modeRead) + case m&ModeWrite != 0: + add(modeWrite) + case m&ModeSync != 0: + add(modeSync) + case m&ModeClose != 0: + add(modeClose) + } + return ret +} + +func packFile(num uint64, t storage.FileType) uint64 { + if num>>(64-typeCount) != 0 { + panic("overflow") + } + return num<> typeCount, storage.FileType(x) & storage.TypeAll +} + +type emulatedError struct { + err error +} + +func (err emulatedError) Error() string { + return fmt.Sprintf("emulated storage error: %v", err.err) +} + +type storageLock struct { + s *Storage + r util.Releaser +} + +func (l storageLock) Release() { + l.r.Release() + l.s.logI("storage lock released") +} + +type reader struct { + f *file + storage.Reader +} + +func (r *reader) Read(p []byte) (n int, err error) { + err = r.f.s.emulateError(ModeRead, r.f.Type()) + if err == nil { + r.f.s.stall(ModeRead, r.f.Type()) + n, err = r.Reader.Read(p) + } + r.f.s.count(ModeRead, r.f.Type(), n) + if err != nil && err != io.EOF { + r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) + } + return +} + +func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { + err = r.f.s.emulateError(ModeRead, r.f.Type()) + if err == nil { + r.f.s.stall(ModeRead, r.f.Type()) + n, err = r.Reader.ReadAt(p, off) + } + r.f.s.count(ModeRead, r.f.Type(), n) + if err != nil && err != io.EOF { + r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) + } + return +} + +func (r *reader) Close() (err error) { + return r.f.doClose(r.Reader) +} + +type writer struct { + f *file + storage.Writer +} + +func (w *writer) Write(p []byte) (n int, err error) { + err = w.f.s.emulateError(ModeWrite, w.f.Type()) + if err == nil { + w.f.s.stall(ModeWrite, w.f.Type()) + n, err = w.Writer.Write(p) + } + w.f.s.count(ModeWrite, w.f.Type(), n) + if err != nil && err != io.EOF { + w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) + } + return +} + +func (w *writer) Sync() (err error) { + err = w.f.s.emulateError(ModeSync, w.f.Type()) + if err == nil { + w.f.s.stall(ModeSync, w.f.Type()) + err = w.Writer.Sync() + } + w.f.s.count(ModeSync, w.f.Type(), 0) + if err != nil { + w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) + } + return +} + +func (w *writer) Close() (err error) { + return w.f.doClose(w.Writer) +} + +type file struct { + s *Storage + storage.File +} + +func (f *file) pack() uint64 { + return packFile(f.Num(), f.Type()) +} + +func (f *file) assertOpen() { + ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) +} + +func (f *file) doClose(closer io.Closer) (err error) { + err = f.s.emulateError(ModeClose, f.Type()) + if err == nil { + f.s.stall(ModeClose, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) + err = closer.Close() + } + f.s.countNB(ModeClose, f.Type(), 0) + writer := f.s.opens[f.pack()] + if err != nil { + f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) + } else { + f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) + delete(f.s.opens, f.pack()) + } + return +} + +func (f *file) Open() (r storage.Reader, err error) { + err = f.s.emulateError(ModeOpen, f.Type()) + if err == nil { + f.s.stall(ModeOpen, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + f.assertOpen() + f.s.countNB(ModeOpen, f.Type(), 0) + r, err = f.File.Open() + } + if err != nil { + f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) + } else { + f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) + f.s.opens[f.pack()] = false + r = &reader{f, r} + } + return +} + +func (f *file) Create() (w storage.Writer, err error) { + err = f.s.emulateError(ModeCreate, f.Type()) + if err == nil { + f.s.stall(ModeCreate, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + f.assertOpen() + f.s.countNB(ModeCreate, f.Type(), 0) + w, err = f.File.Create() + } + if err != nil { + f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) + } else { + f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) + f.s.opens[f.pack()] = true + w = &writer{f, w} + } + return +} + +func (f *file) Remove() (err error) { + err = f.s.emulateError(ModeRemove, f.Type()) + if err == nil { + f.s.stall(ModeRemove, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + f.assertOpen() + f.s.countNB(ModeRemove, f.Type(), 0) + err = f.File.Remove() + } + if err != nil { + f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) + } else { + f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) + } + return +} + +type Storage struct { + storage.Storage + closeFn func() error + + lmu sync.Mutex + lb bytes.Buffer + + mu sync.Mutex + // Open files, true=writer, false=reader + opens map[uint64]bool + counters [flattenCount]int + bytesCounter [flattenCount]int64 + emulatedError [flattenCount]error + stallCond sync.Cond + stalled [flattenCount]bool +} + +func (s *Storage) log(skip int, str string) { + s.lmu.Lock() + defer s.lmu.Unlock() + _, file, line, ok := runtime.Caller(skip + 2) + if ok { + // Truncate file name at last file name separator. + if index := strings.LastIndex(file, "/"); index >= 0 { + file = file[index+1:] + } else if index = strings.LastIndex(file, "\\"); index >= 0 { + file = file[index+1:] + } + } else { + file = "???" + line = 1 + } + fmt.Fprintf(&s.lb, "%s:%d: ", file, line) + lines := strings.Split(str, "\n") + if l := len(lines); l > 1 && lines[l-1] == "" { + lines = lines[:l-1] + } + for i, line := range lines { + if i > 0 { + s.lb.WriteString("\n\t") + } + s.lb.WriteString(line) + } + s.lb.WriteByte('\n') +} + +func (s *Storage) logISkip(skip int, format string, args ...interface{}) { + pc, _, _, ok := runtime.Caller(skip + 1) + if ok { + if f := runtime.FuncForPC(pc); f != nil { + fname := f.Name() + if index := strings.LastIndex(fname, "."); index >= 0 { + fname = fname[index+1:] + } + format = fname + ": " + format + } + } + s.log(skip+1, fmt.Sprintf(format, args...)) +} + +func (s *Storage) logI(format string, args ...interface{}) { + s.logISkip(1, format, args...) +} + +func (s *Storage) Log(str string) { + s.log(1, "Log: "+str) +} + +func (s *Storage) Lock() (r util.Releaser, err error) { + r, err = s.Storage.Lock() + if err != nil { + s.logI("storage locking failed, err=%v", err) + } else { + s.logI("storage locked") + r = storageLock{s, r} + } + return +} + +func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { + return &file{s, s.Storage.GetFile(num, t)} +} + +func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { + rfiles, err := s.Storage.GetFiles(t) + if err != nil { + s.logI("get files failed, err=%v", err) + return + } + files = make([]storage.File, len(rfiles)) + for i, f := range rfiles { + files[i] = &file{s, f} + } + s.logI("get files, type=0x%x count=%d", int(t), len(files)) + return +} + +func (s *Storage) GetManifest() (f storage.File, err error) { + manifest, err := s.Storage.GetManifest() + if err != nil { + if !os.IsNotExist(err) { + s.logI("get manifest failed, err=%v", err) + } + return + } + s.logI("get manifest, num=%d", manifest.Num()) + return &file{s, manifest}, nil +} + +func (s *Storage) SetManifest(f storage.File) error { + f_, ok := f.(*file) + ExpectWithOffset(1, ok).To(BeTrue()) + ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) + err := s.Storage.SetManifest(f_.File) + if err != nil { + s.logI("set manifest failed, err=%v", err) + } else { + s.logI("set manifest, num=%d", f_.Num()) + } + return err +} + +func (s *Storage) openFiles() string { + out := "Open files:" + for x, writer := range s.opens { + num, t := unpackFile(x) + out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) + } + return out +} + +func (s *Storage) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) + err := s.Storage.Close() + if err != nil { + s.logI("storage closing failed, err=%v", err) + } else { + s.logI("storage closed") + } + if s.closeFn != nil { + if err1 := s.closeFn(); err1 != nil { + s.logI("close func error, err=%v", err1) + } + } + return err +} + +func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { + s.counters[flattenType(m, t)]++ + s.bytesCounter[flattenType(m, t)] += int64(n) +} + +func (s *Storage) count(m StorageMode, t storage.FileType, n int) { + s.mu.Lock() + defer s.mu.Unlock() + s.countNB(m, t, n) +} + +func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { + for _, x := range listFlattenType(m, t) { + s.counters[x] = 0 + s.bytesCounter[x] = 0 + } +} + +func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { + for _, x := range listFlattenType(m, t) { + count += s.counters[x] + bytes += s.bytesCounter[x] + } + return +} + +func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.emulatedError[flattenType(m, t)] + if err != nil { + return emulatedError{err} + } + return nil +} + +func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.emulatedError[x] = err + } +} + +func (s *Storage) stall(m StorageMode, t storage.FileType) { + x := flattenType(m, t) + s.mu.Lock() + defer s.mu.Unlock() + for s.stalled[x] { + s.stallCond.Wait() + } +} + +func (s *Storage) Stall(m StorageMode, t storage.FileType) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.stalled[x] = true + } +} + +func (s *Storage) Release(m StorageMode, t storage.FileType) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.stalled[x] = false + } + s.stallCond.Broadcast() +} + +func NewStorage() *Storage { + var stor storage.Storage + var closeFn func() error + if storageUseFS { + for { + storageMu.Lock() + num := storageNum + storageNum++ + storageMu.Unlock() + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) + if _, err := os.Stat(path); os.IsNotExist(err) { + stor, err = storage.OpenFile(path) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) + closeFn = func() error { + if storageKeepFS { + return nil + } + return os.RemoveAll(path) + } + break + } + } + } else { + stor = storage.NewMemStorage() + } + s := &Storage{ + Storage: stor, + closeFn: closeFn, + opens: make(map[uint64]bool), + } + s.stallCond.L = &s.mu + return s +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go new file mode 100644 index 0000000..150b7d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go @@ -0,0 +1,157 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "bytes" + "flag" + "math/rand" + "reflect" + "sync" + + "github.com/onsi/ginkgo/config" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" +) + +var ( + runfn = make(map[string][]func()) + runmu sync.Mutex +) + +func Defer(args ...interface{}) bool { + var ( + group string + fn func() + ) + for _, arg := range args { + v := reflect.ValueOf(arg) + switch v.Kind() { + case reflect.String: + group = v.String() + case reflect.Func: + r := reflect.ValueOf(&fn).Elem() + r.Set(v) + } + } + if fn != nil { + runmu.Lock() + runfn[group] = append(runfn[group], fn) + runmu.Unlock() + } + return true +} + +func RunDefer(groups ...string) bool { + if len(groups) == 0 { + groups = append(groups, "") + } + runmu.Lock() + var runfn_ []func() + for _, group := range groups { + runfn_ = append(runfn_, runfn[group]...) + delete(runfn, group) + } + runmu.Unlock() + for _, fn := range runfn_ { + fn() + } + return runfn_ != nil +} + +func RandomSeed() int64 { + if !flag.Parsed() { + panic("random seed not initialized") + } + return config.GinkgoConfig.RandomSeed +} + +func NewRand() *rand.Rand { + return rand.New(rand.NewSource(RandomSeed())) +} + +var cmp = comparer.DefaultComparer + +func BytesSeparator(a, b []byte) []byte { + if bytes.Equal(a, b) { + return b + } + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for ; i < n && (a[i] == b[i]); i++ { + } + x := append([]byte{}, a[:i]...) + if i < n { + if c := a[i] + 1; c < b[i] { + return append(x, c) + } + x = append(x, a[i]) + i++ + } + for ; i < len(a); i++ { + if c := a[i]; c < 0xff { + return append(x, c+1) + } else { + x = append(x, c) + } + } + if len(b) > i && b[i] > 0 { + return append(x, b[i]-1) + } + return append(x, 'x') +} + +func BytesAfter(b []byte) []byte { + var x []byte + for _, c := range b { + if c < 0xff { + return append(x, c+1) + } else { + x = append(x, c) + } + } + return append(x, 'x') +} + +func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { + if rnd == nil { + rnd = NewRand() + } + for x := 0; x < round; x++ { + fn(rnd.Intn(n)) + } + return +} + +func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { + if rnd == nil { + rnd = NewRand() + } + for x := 0; x < round; x++ { + for _, i := range rnd.Perm(n) { + fn(i) + } + } + return +} + +func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { + if rnd == nil { + rnd = NewRand() + } + for x := 0; x < round; x++ { + start := rnd.Intn(n) + length := 0 + if j := n - start; j > 0 { + length = rnd.Intn(j) + } + fn(start, start+length) + } + return +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go new file mode 100644 index 0000000..597b65f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go @@ -0,0 +1,59 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + . "github.com/onsi/gomega" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type testingDB struct { + *DB + ro *opt.ReadOptions + wo *opt.WriteOptions + stor *testutil.Storage +} + +func (t *testingDB) TestPut(key []byte, value []byte) error { + return t.Put(key, value, t.wo) +} + +func (t *testingDB) TestDelete(key []byte) error { + return t.Delete(key, t.wo) +} + +func (t *testingDB) TestGet(key []byte) (value []byte, err error) { + return t.Get(key, t.ro) +} + +func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.NewIterator(slice, t.ro) +} + +func (t *testingDB) TestClose() { + err := t.Close() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + err = t.stor.Close() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) +} + +func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { + stor := testutil.NewStorage() + db, err := Open(stor, o) + // FIXME: This may be called from outside It, which may cause panic. + Expect(err).NotTo(HaveOccurred()) + return &testingDB{ + DB: db, + ro: ro, + wo: wo, + stor: stor, + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go new file mode 100644 index 0000000..912d2f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go @@ -0,0 +1,91 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +func shorten(str string) string { + if len(str) <= 4 { + return str + } + return str[:1] + ".." + str[len(str)-1:] +} + +var bunits = [...]string{"", "Ki", "Mi", "Gi"} + +func shortenb(bytes int) string { + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%d%sB", bytes, bunits[i]) +} + +func sshortenb(bytes int) string { + if bytes == 0 { + return "~" + } + sign := "+" + if bytes < 0 { + sign = "-" + bytes *= -1 + } + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) +} + +func sint(x int) string { + if x == 0 { + return "~" + } + sign := "+" + if x < 0 { + sign = "-" + x *= -1 + } + return fmt.Sprintf("%s%d", sign, x) +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +type files []storage.File + +func (p files) Len() int { + return len(p) +} + +func (p files) Less(i, j int) bool { + return p[i].Num() < p[j].Num() +} + +func (p files) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func (p files) sort() { + sort.Sort(p) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go new file mode 100644 index 0000000..21de242 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go @@ -0,0 +1,293 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +// This a copy of Go std bytes.Buffer with some modification +// and some features stripped. + +import ( + "bytes" + "io" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. +} + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + switch { + case n < 0 || n > b.Len(): + panic("leveldb/util.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Alloc allocs n bytes of slice from the buffer, growing the buffer as +// needed. If n is negative, Alloc will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Alloc(n int) []byte { + if n < 0 { + panic("leveldb/util.Buffer.Alloc: negative count") + } + m := b.grow(n) + return b.buf[m:] +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("leveldb/util.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with bytes.ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with bytes.ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(bytes.ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("leveldb/util.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// bytes.ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + m := b.grow(1) + b.buf[m] = c + return nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + return c, nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + return line, err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go new file mode 100644 index 0000000..554e28e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go @@ -0,0 +1,205 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "fmt" + "sync/atomic" + "time" +) + +type buffer struct { + b []byte + miss int +} + +// BufferPool is a 'buffer pool'. +type BufferPool struct { + pool [6]chan []byte + size [5]uint32 + sizeMiss [5]uint32 + sizeHalf [5]uint32 + baseline [4]int + baselinex0 int + baselinex1 int + baseline0 int + baseline1 int + baseline2 int + close chan struct{} + + get uint32 + put uint32 + half uint32 + less uint32 + equal uint32 + greater uint32 + miss uint32 +} + +func (p *BufferPool) poolNum(n int) int { + if n <= p.baseline0 && n > p.baseline0/2 { + return 0 + } + for i, x := range p.baseline { + if n <= x { + return i + 1 + } + } + return len(p.baseline) + 1 +} + +// Get returns buffer with length of n. +func (p *BufferPool) Get(n int) []byte { + atomic.AddUint32(&p.get, 1) + + poolNum := p.poolNum(n) + pool := p.pool[poolNum] + if poolNum == 0 { + // Fast path. + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + select { + case pool <- b: + default: + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + } + default: + atomic.AddUint32(&p.miss, 1) + } + + return make([]byte, n, p.baseline0) + } else { + sizePtr := &p.size[poolNum-1] + + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + sizeHalfPtr := &p.sizeHalf[poolNum-1] + if atomic.AddUint32(sizeHalfPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) + atomic.StoreUint32(sizeHalfPtr, 0) + } else { + select { + case pool <- b: + default: + } + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { + select { + case pool <- b: + default: + } + } + } + default: + atomic.AddUint32(&p.miss, 1) + } + + if size := atomic.LoadUint32(sizePtr); uint32(n) > size { + if size == 0 { + atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) + } else { + sizeMissPtr := &p.sizeMiss[poolNum-1] + if atomic.AddUint32(sizeMissPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(n)) + atomic.StoreUint32(sizeMissPtr, 0) + } + } + return make([]byte, n) + } else { + return make([]byte, n, size) + } + } +} + +// Put adds given buffer to the pool. +func (p *BufferPool) Put(b []byte) { + atomic.AddUint32(&p.put, 1) + + pool := p.pool[p.poolNum(cap(b))] + select { + case pool <- b: + default: + } + +} + +func (p *BufferPool) Close() { + select { + case p.close <- struct{}{}: + default: + } +} + +func (p *BufferPool) String() string { + return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", + p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) +} + +func (p *BufferPool) drain() { + ticker := time.NewTicker(2 * time.Second) + for { + select { + case <-ticker.C: + for _, ch := range p.pool { + select { + case <-ch: + default: + } + } + case <-p.close: + for _, ch := range p.pool { + close(ch) + } + return + } + } +} + +// NewBufferPool creates a new initialized 'buffer pool'. +func NewBufferPool(baseline int) *BufferPool { + if baseline <= 0 { + panic("baseline can't be <= 0") + } + p := &BufferPool{ + baseline0: baseline, + baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, + close: make(chan struct{}, 1), + } + for i, cap := range []int{2, 2, 4, 4, 2, 1} { + p.pool[i] = make(chan []byte, cap) + } + go p.drain() + return p +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go new file mode 100644 index 0000000..87d9673 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go @@ -0,0 +1,369 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +import ( + "bytes" + "io" + "math/rand" + "runtime" + "testing" +) + +const N = 10000 // make this bigger for a larger (and slower) test +var data string // test data for write tests +var testBytes []byte // test data; same as data but as a slice. + +func init() { + testBytes = make([]byte, N) + for i := 0; i < N; i++ { + testBytes[i] = 'a' + byte(i%26) + } + data = string(testBytes) +} + +// Verify that contents of buf match the string s. +func check(t *testing.T, testname string, buf *Buffer, s string) { + bytes := buf.Bytes() + str := buf.String() + if buf.Len() != len(bytes) { + t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) + } + + if buf.Len() != len(str) { + t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) + } + + if buf.Len() != len(s) { + t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) + } + + if string(bytes) != s { + t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) + } +} + +// Fill buf through n writes of byte slice fub. +// The initial contents of buf corresponds to the string s; +// the result is the final contents of buf returned as a string. +func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { + check(t, testname+" (fill 1)", buf, s) + for ; n > 0; n-- { + m, err := buf.Write(fub) + if m != len(fub) { + t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) + } + if err != nil { + t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) + } + s += string(fub) + check(t, testname+" (fill 4)", buf, s) + } + return s +} + +func TestNewBuffer(t *testing.T) { + buf := NewBuffer(testBytes) + check(t, "NewBuffer", buf, data) +} + +// Empty buf through repeated reads into fub. +// The initial contents of buf corresponds to the string s. +func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { + check(t, testname+" (empty 1)", buf, s) + + for { + n, err := buf.Read(fub) + if n == 0 { + break + } + if err != nil { + t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) + } + s = s[n:] + check(t, testname+" (empty 3)", buf, s) + } + + check(t, testname+" (empty 4)", buf, "") +} + +func TestBasicOperations(t *testing.T) { + var buf Buffer + + for i := 0; i < 5; i++ { + check(t, "TestBasicOperations (1)", &buf, "") + + buf.Reset() + check(t, "TestBasicOperations (2)", &buf, "") + + buf.Truncate(0) + check(t, "TestBasicOperations (3)", &buf, "") + + n, err := buf.Write([]byte(data[0:1])) + if n != 1 { + t.Errorf("wrote 1 byte, but n == %d", n) + } + if err != nil { + t.Errorf("err should always be nil, but err == %s", err) + } + check(t, "TestBasicOperations (4)", &buf, "a") + + buf.WriteByte(data[1]) + check(t, "TestBasicOperations (5)", &buf, "ab") + + n, err = buf.Write([]byte(data[2:26])) + if n != 24 { + t.Errorf("wrote 25 bytes, but n == %d", n) + } + check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) + + buf.Truncate(26) + check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) + + buf.Truncate(20) + check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) + + empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) + empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) + + buf.WriteByte(data[1]) + c, err := buf.ReadByte() + if err != nil { + t.Error("ReadByte unexpected eof") + } + if c != data[1] { + t.Errorf("ReadByte wrong value c=%v", c) + } + c, err = buf.ReadByte() + if err == nil { + t.Error("ReadByte unexpected not eof") + } + } +} + +func TestLargeByteWrites(t *testing.T) { + var buf Buffer + limit := 30 + if testing.Short() { + limit = 9 + } + for i := 3; i < limit; i += 3 { + s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) + empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) + } + check(t, "TestLargeByteWrites (3)", &buf, "") +} + +func TestLargeByteReads(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + } + check(t, "TestLargeByteReads (3)", &buf, "") +} + +func TestMixedReadsAndWrites(t *testing.T) { + var buf Buffer + s := "" + for i := 0; i < 50; i++ { + wlen := rand.Intn(len(data)) + s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) + rlen := rand.Intn(len(data)) + fub := make([]byte, rlen) + n, _ := buf.Read(fub) + s = s[n:] + } + empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) +} + +func TestNil(t *testing.T) { + var b *Buffer + if b.String() != "" { + t.Errorf("expected ; got %q", b.String()) + } +} + +func TestReadFrom(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + b.ReadFrom(&buf) + empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) + } +} + +func TestWriteTo(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + buf.WriteTo(&b) + empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) + } +} + +func TestNext(t *testing.T) { + b := []byte{0, 1, 2, 3, 4} + tmp := make([]byte, 5) + for i := 0; i <= 5; i++ { + for j := i; j <= 5; j++ { + for k := 0; k <= 6; k++ { + // 0 <= i <= j <= 5; 0 <= k <= 6 + // Check that if we start with a buffer + // of length j at offset i and ask for + // Next(k), we get the right bytes. + buf := NewBuffer(b[0:j]) + n, _ := buf.Read(tmp[0:i]) + if n != i { + t.Fatalf("Read %d returned %d", i, n) + } + bb := buf.Next(k) + want := k + if want > j-i { + want = j - i + } + if len(bb) != want { + t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) + } + for l, v := range bb { + if v != byte(l+i) { + t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) + } + } + } + } + } +} + +var readBytesTests = []struct { + buffer string + delim byte + expected []string + err error +}{ + {"", 0, []string{""}, io.EOF}, + {"a\x00", 0, []string{"a\x00"}, nil}, + {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, + {"hello\x01world", 1, []string{"hello\x01"}, nil}, + {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, + {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, + {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, +} + +func TestReadBytes(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBuffer([]byte(test.buffer)) + var err error + for _, expected := range test.expected { + var bytes []byte + bytes, err = buf.ReadBytes(test.delim) + if string(bytes) != expected { + t.Errorf("expected %q, got %q", expected, bytes) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} + +func TestGrow(t *testing.T) { + x := []byte{'x'} + y := []byte{'y'} + tmp := make([]byte, 72) + for _, startLen := range []int{0, 100, 1000, 10000, 100000} { + xBytes := bytes.Repeat(x, startLen) + for _, growLen := range []int{0, 100, 1000, 10000, 100000} { + buf := NewBuffer(xBytes) + // If we read, this affects buf.off, which is good to test. + readBytes, _ := buf.Read(tmp) + buf.Grow(growLen) + yBytes := bytes.Repeat(y, growLen) + // Check no allocation occurs in write, as long as we're single-threaded. + var m1, m2 runtime.MemStats + runtime.ReadMemStats(&m1) + buf.Write(yBytes) + runtime.ReadMemStats(&m2) + if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { + t.Errorf("allocation occurred during write") + } + // Check that buffer has correct data. + if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { + t.Errorf("bad initial data at %d %d", startLen, growLen) + } + if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { + t.Errorf("bad written data at %d %d", startLen, growLen) + } + } + } +} + +// Was a bug: used to give EOF reading empty slice at EOF. +func TestReadEmptyAtEOF(t *testing.T) { + b := new(Buffer) + slice := make([]byte, 0) + n, err := b.Read(slice) + if err != nil { + t.Errorf("read error: %v", err) + } + if n != 0 { + t.Errorf("wrong count; got %d want 0", n) + } +} + +// Tests that we occasionally compact. Issue 5154. +func TestBufferGrowth(t *testing.T) { + var b Buffer + buf := make([]byte, 1024) + b.Write(buf[0:1]) + var cap0 int + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + if i == 0 { + cap0 = cap(b.buf) + } + } + cap1 := cap(b.buf) + // (*Buffer).grow allows for 2x capacity slop before sliding, + // so set our error threshold at 3x. + if cap1 > cap0*3 { + t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) + } +} + +// From Issue 5154. +func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf[0:1]) + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + } + } +} + +// Check that we don't compact too often. From Issue 5154. +func BenchmarkBufferFullSmallReads(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf) + for b.Len()+20 < cap(b.buf) { + b.Write(buf[:10]) + } + for i := 0; i < 5<<10; i++ { + b.Read(buf[:1]) + b.Write(buf[:1]) + } + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go new file mode 100644 index 0000000..631c9d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go @@ -0,0 +1,30 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "hash/crc32" +) + +var table = crc32.MakeTable(crc32.Castagnoli) + +// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. +type CRC uint32 + +// NewCRC creates a new crc based on the given bytes. +func NewCRC(b []byte) CRC { + return CRC(0).Update(b) +} + +// Update updates the crc with the given bytes. +func (c CRC) Update(b []byte) CRC { + return CRC(crc32.Update(uint32(c), table, b)) +} + +// Value returns a masked crc. +func (c CRC) Value() uint32 { + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go new file mode 100644 index 0000000..5490366 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go @@ -0,0 +1,48 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "bytes" + "encoding/binary" +) + +// Hash return hash of the given data. +func Hash(data []byte, seed uint32) uint32 { + // Similar to murmur hash + var m uint32 = 0xc6a4a793 + var r uint32 = 24 + h := seed ^ (uint32(len(data)) * m) + + buf := bytes.NewBuffer(data) + for buf.Len() >= 4 { + var w uint32 + binary.Read(buf, binary.LittleEndian, &w) + h += w + h *= m + h ^= (h >> 16) + } + + rest := buf.Bytes() + switch len(rest) { + default: + panic("not reached") + case 3: + h += uint32(rest[2]) << 16 + fallthrough + case 2: + h += uint32(rest[1]) << 8 + fallthrough + case 1: + h += uint32(rest[0]) + h *= m + h ^= (h >> r) + case 0: + } + + return h +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go new file mode 100644 index 0000000..1f7fdd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go @@ -0,0 +1,21 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build go1.3 + +package util + +import ( + "sync" +) + +type Pool struct { + sync.Pool +} + +func NewPool(cap int) *Pool { + return &Pool{} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go new file mode 100644 index 0000000..27b8d03 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go @@ -0,0 +1,33 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build !go1.3 + +package util + +type Pool struct { + pool chan interface{} +} + +func (p *Pool) Get() interface{} { + select { + case x := <-p.pool: + return x + default: + return nil + } +} + +func (p *Pool) Put(x interface{}) { + select { + case p.pool <- x: + default: + } +} + +func NewPool(cap int) *Pool { + return &Pool{pool: make(chan interface{}, cap)} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go new file mode 100644 index 0000000..8515958 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go @@ -0,0 +1,32 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +// Range is a key range. +type Range struct { + // Start of the key range, include in the range. + Start []byte + + // Limit of the key range, not include in the range. + Limit []byte +} + +// BytesPrefix returns key range that satisfy the given prefix. +// This only applicable for the standard 'bytes comparer'. +func BytesPrefix(prefix []byte) *Range { + var limit []byte + for i := len(prefix) - 1; i >= 0; i-- { + c := prefix[i] + if c < 0xff { + limit = make([]byte, i+1) + copy(limit, prefix) + limit[i] = c + 1 + break + } + } + return &Range{prefix, limit} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go new file mode 100644 index 0000000..229c7d4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go @@ -0,0 +1,49 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package util provides utilities used throughout leveldb. +package util + +import ( + "errors" +) + +var ( + ErrNotFound = errors.New("leveldb: not found") +) + +// Releaser is the interface that wraps the basic Release method. +type Releaser interface { + // Release releases associated resources. Release should always success + // and can be called multipe times without causing error. + Release() +} + +// ReleaseSetter is the interface that wraps the basic SetReleaser method. +type ReleaseSetter interface { + // SetReleaser associates the given releaser to the resources. The + // releaser will be called once coresponding resources released. + // Calling SetReleaser with nil will clear the releaser. + SetReleaser(releaser Releaser) +} + +// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. +type BasicReleaser struct { + releaser Releaser +} + +// Release implements Releaser.Release. +func (r *BasicReleaser) Release() { + if r.releaser != nil { + r.releaser.Release() + r.releaser = nil + } +} + +// SetReleaser implements ReleaseSetter.SetReleaser. +func (r *BasicReleaser) SetReleaser(releaser Releaser) { + r.releaser = releaser +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go new file mode 100644 index 0000000..9907be2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go @@ -0,0 +1,436 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync/atomic" + "unsafe" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var levelMaxSize [kNumLevels]float64 + +func init() { + // Precompute max size of each level + for level := range levelMaxSize { + res := float64(10 * 1048576) + for n := level; n > 1; n-- { + res *= 10 + } + levelMaxSize[level] = res + } +} + +type tSet struct { + level int + table *tFile +} + +type version struct { + s *session + + tables [kNumLevels]tFiles + + // Level that should be compacted next and its compaction score. + // Score < 1 means compaction is not strictly needed. These fields + // are initialized by computeCompaction() + cLevel int + cScore float64 + + cSeek unsafe.Pointer + + ref int + next *version +} + +func (v *version) release_NB() { + v.ref-- + if v.ref > 0 { + return + } + if v.ref < 0 { + panic("negative version ref") + } + + tables := make(map[uint64]bool) + for _, tt := range v.next.tables { + for _, t := range tt { + num := t.file.Num() + tables[num] = true + } + } + + for _, tt := range v.tables { + for _, t := range tt { + num := t.file.Num() + if _, ok := tables[num]; !ok { + v.s.tops.remove(t) + } + } + } + + v.next.release_NB() + v.next = nil +} + +func (v *version) release() { + v.s.vmu.Lock() + v.release_NB() + v.s.vmu.Unlock() +} + +func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { + ukey := ikey.ukey() + + // Walk tables level-by-level. + for level, tables := range v.tables { + if len(tables) == 0 { + continue + } + + if level == 0 { + // Level-0 files may overlap each other. Find all files that + // overlap ukey. + for _, t := range tables { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(level, t) { + return + } + } + } + } else { + if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { + t := tables[i] + if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + if !f(level, t) { + return + } + } + } + } + + if lf != nil && !lf(level) { + return + } + } +} + +func (v *version) get(ikey iKey, ro *opt.ReadOptions) (value []byte, tcomp bool, err error) { + ukey := ikey.ukey() + + var ( + tset *tSet + tseek bool + + l0found bool + l0seq uint64 + l0vt vType + l0val []byte + ) + + err = ErrNotFound + + // Since entries never hope across level, finding key/value + // in smaller level make later levels irrelevant. + v.walkOverlapping(ikey, func(level int, t *tFile) bool { + if !tseek { + if tset == nil { + tset = &tSet{level, t} + } else if tset.table.consumeSeek() <= 0 { + tseek = true + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + } + + ikey__, val_, err_ := v.s.tops.find(t, ikey, ro) + switch err_ { + case nil: + case ErrNotFound: + return true + default: + err = err_ + return false + } + + ikey_ := iKey(ikey__) + if seq, vt, ok := ikey_.parseNum(); ok { + if v.s.icmp.uCompare(ukey, ikey_.ukey()) != 0 { + return true + } + + if level == 0 { + if seq >= l0seq { + l0found = true + l0seq = seq + l0vt = vt + l0val = val_ + } + } else { + switch vt { + case tVal: + value = val_ + err = nil + case tDel: + default: + panic("leveldb: invalid internal key type") + } + return false + } + } else { + err = errors.New("leveldb: internal key corrupted") + return false + } + + return true + }, func(level int) bool { + if l0found { + switch l0vt { + case tVal: + value = l0val + err = nil + case tDel: + default: + panic("leveldb: invalid internal key type") + } + return false + } + + return true + }) + + return +} + +func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { + // Merge all level zero files together since they may overlap + for _, t := range v.tables[0] { + it := v.s.tops.newIterator(t, slice, ro) + its = append(its, it) + } + + strict := v.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator) + for _, tables := range v.tables[1:] { + if len(tables) == 0 { + continue + } + + it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict, true) + its = append(its, it) + } + + return +} + +func (v *version) newStaging() *versionStaging { + return &versionStaging{base: v} +} + +// Spawn a new version based on this version. +func (v *version) spawn(r *sessionRecord) *version { + staging := v.newStaging() + staging.commit(r) + return staging.finish() +} + +func (v *version) fillRecord(r *sessionRecord) { + for level, ts := range v.tables { + for _, t := range ts { + r.addTableFile(level, t) + } + } +} + +func (v *version) tLen(level int) int { + return len(v.tables[level]) +} + +func (v *version) offsetOf(ikey iKey) (n uint64, err error) { + for level, tables := range v.tables { + for _, t := range tables { + if v.s.icmp.Compare(t.imax, ikey) <= 0 { + // Entire file is before "ikey", so just add the file size + n += t.size + } else if v.s.icmp.Compare(t.imin, ikey) > 0 { + // Entire file is after "ikey", so ignore + if level > 0 { + // Files other than level 0 are sorted by meta->min, so + // no further files in this level will contain data for + // "ikey". + break + } + } else { + // "ikey" falls in the range for this table. Add the + // approximate offset of "ikey" within the table. + var nn uint64 + nn, err = v.s.tops.offsetOf(t, ikey) + if err != nil { + return 0, err + } + n += nn + } + } + } + + return +} + +func (v *version) pickLevel(umin, umax []byte) (level int) { + if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { + var overlaps tFiles + for ; level < kMaxMemCompactLevel; level++ { + if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { + break + } + overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) + if overlaps.size() > kMaxGrandParentOverlapBytes { + break + } + } + } + + return +} + +func (v *version) computeCompaction() { + // Precomputed best level for next compaction + var bestLevel int = -1 + var bestScore float64 = -1 + + for level, tables := range v.tables { + var score float64 + if level == 0 { + // We treat level-0 specially by bounding the number of files + // instead of number of bytes for two reasons: + // + // (1) With larger write-buffer sizes, it is nice not to do too + // many level-0 compactions. + // + // (2) The files in level-0 are merged on every read and + // therefore we wish to avoid too many files when the individual + // file size is small (perhaps because of a small write-buffer + // setting, or very high compression ratios, or lots of + // overwrites/deletions). + score = float64(len(tables)) / kL0_CompactionTrigger + } else { + score = float64(tables.size()) / levelMaxSize[level] + } + + if score > bestScore { + bestLevel = level + bestScore = score + } + } + + v.cLevel = bestLevel + v.cScore = bestScore +} + +func (v *version) needCompaction() bool { + return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil +} + +type versionStaging struct { + base *version + tables [kNumLevels]struct { + added map[uint64]ntRecord + deleted map[uint64]struct{} + } +} + +func (p *versionStaging) commit(r *sessionRecord) { + // Deleted tables. + for _, r := range r.deletedTables { + tm := &(p.tables[r.level]) + + if len(p.base.tables[r.level]) > 0 { + if tm.deleted == nil { + tm.deleted = make(map[uint64]struct{}) + } + tm.deleted[r.num] = struct{}{} + } + + if tm.added != nil { + delete(tm.added, r.num) + } + } + + // New tables. + for _, r := range r.addedTables { + tm := &(p.tables[r.level]) + + if tm.added == nil { + tm.added = make(map[uint64]ntRecord) + } + tm.added[r.num] = r + + if tm.deleted != nil { + delete(tm.deleted, r.num) + } + } +} + +func (p *versionStaging) finish() *version { + // Build new version. + nv := &version{s: p.base.s} + for level, tm := range p.tables { + btables := p.base.tables[level] + + n := len(btables) + len(tm.added) - len(tm.deleted) + if n < 0 { + n = 0 + } + nt := make(tFiles, 0, n) + + // Base tables. + for _, t := range btables { + if _, ok := tm.deleted[t.file.Num()]; ok { + continue + } + if _, ok := tm.added[t.file.Num()]; ok { + continue + } + nt = append(nt, t) + } + + // New tables. + for _, r := range tm.added { + nt = append(nt, r.makeFile(p.base.s)) + } + + // Sort tables. + if level == 0 { + nt.sortByNum() + } else { + nt.sortByKey(p.base.s.icmp) + } + nv.tables[level] = nt + } + + // Compute compaction score for new version. + nv.computeCompaction() + + return nv +} + +type versionReleaser struct { + v *version + once bool +} + +func (vr *versionReleaser) Release() { + v := vr.v + v.s.vmu.Lock() + if !vr.once { + v.release_NB() + vr.once = true + } + v.s.vmu.Unlock() +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore new file mode 100644 index 0000000..191a536 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore @@ -0,0 +1,4 @@ +_* +*.swp +*.[568] +[568].out diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE new file mode 100644 index 0000000..545cf2d --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE @@ -0,0 +1,25 @@ +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/README.md b/Godeps/_workspace/src/gopkg.in/check.v1/README.md new file mode 100644 index 0000000..0ca9e57 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/README.md @@ -0,0 +1,20 @@ +Instructions +============ + +Install the package with: + + go get gopkg.in/check.v1 + +Import it with: + + import "gopkg.in/check.v1" + +and use _check_ as the package name inside the code. + +For more details, visit the project page: + +* http://labix.org/gocheck + +and the API documentation: + +* https://gopkg.in/check.v1 diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/TODO b/Godeps/_workspace/src/gopkg.in/check.v1/TODO new file mode 100644 index 0000000..3349827 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/TODO @@ -0,0 +1,2 @@ +- Assert(slice, Contains, item) +- Parallel test support diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go new file mode 100644 index 0000000..48cb8c8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go @@ -0,0 +1,163 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package check + +import ( + "fmt" + "runtime" + "time" +) + +var memStats runtime.MemStats + +// testingB is a type passed to Benchmark functions to manage benchmark +// timing and to specify the number of iterations to run. +type timer struct { + start time.Time // Time test or benchmark started + duration time.Duration + N int + bytes int64 + timerOn bool + benchTime time.Duration + // The initial states of memStats.Mallocs and memStats.TotalAlloc. + startAllocs uint64 + startBytes uint64 + // The net total of this test after being run. + netAllocs uint64 + netBytes uint64 +} + +// StartTimer starts timing a test. This function is called automatically +// before a benchmark starts, but it can also used to resume timing after +// a call to StopTimer. +func (c *C) StartTimer() { + if !c.timerOn { + c.start = time.Now() + c.timerOn = true + + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } +} + +// StopTimer stops timing a test. This can be used to pause the timer +// while performing complex initialization that you don't +// want to measure. +func (c *C) StopTimer() { + if c.timerOn { + c.duration += time.Now().Sub(c.start) + c.timerOn = false + runtime.ReadMemStats(&memStats) + c.netAllocs += memStats.Mallocs - c.startAllocs + c.netBytes += memStats.TotalAlloc - c.startBytes + } +} + +// ResetTimer sets the elapsed benchmark time to zero. +// It does not affect whether the timer is running. +func (c *C) ResetTimer() { + if c.timerOn { + c.start = time.Now() + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } + c.duration = 0 + c.netAllocs = 0 + c.netBytes = 0 +} + +// SetBytes informs the number of bytes that the benchmark processes +// on each iteration. If this is called in a benchmark it will also +// report MB/s. +func (c *C) SetBytes(n int64) { + c.bytes = n +} + +func (c *C) nsPerOp() int64 { + if c.N <= 0 { + return 0 + } + return c.duration.Nanoseconds() / int64(c.N) +} + +func (c *C) mbPerSec() float64 { + if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { + return 0 + } + return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() +} + +func (c *C) timerString() string { + if c.N <= 0 { + return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) + } + mbs := c.mbPerSec() + mb := "" + if mbs != 0 { + mb = fmt.Sprintf("\t%7.2f MB/s", mbs) + } + nsop := c.nsPerOp() + ns := fmt.Sprintf("%10d ns/op", nsop) + if c.N > 0 && nsop < 100 { + // The format specifiers here make sure that + // the ones digits line up for all three possible formats. + if nsop < 10 { + ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } else { + ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } + } + memStats := "" + if c.benchMem { + allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) + allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) + memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) + } + return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) +} + +func min(x, y int) int { + if x > y { + return y + } + return x +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +// roundDown10 rounds a number down to the nearest power of 10. +func roundDown10(n int) int { + var tens = 0 + // tens = floor(log_10(n)) + for n > 10 { + n = n / 10 + tens++ + } + // result = 10^tens + result := 1 + for i := 0; i < tens; i++ { + result *= 10 + } + return result +} + +// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. +func roundUp(n int) int { + base := roundDown10(n) + if n < (2 * base) { + return 2 * base + } + if n < (5 * base) { + return 5 * base + } + return 10 * base +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go new file mode 100644 index 0000000..1814cec --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go @@ -0,0 +1,91 @@ +// These tests verify the test running logic. + +package check_test + +import ( + "time" + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +var benchmarkS = Suite(&BenchmarkS{}) + +type BenchmarkS struct{} + +func (s *BenchmarkS) TestCountSuite(c *C) { + suitesRun += 1 +} + +func (s *BenchmarkS) TestBasicTestTiming(c *C) { + helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond} + output := String{} + runConf := RunConf{Output: &output, Verbose: true} + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" + + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestStreamTestTiming(c *C) { + helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond} + output := String{} + runConf := RunConf{Output: &output, Stream: true} + Run(&helper, &runConf) + + expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestBenchmark(c *C) { + helper := FixtureHelper{sleep: 100000} + output := String{} + runConf := RunConf{ + Output: &output, + Benchmark: true, + BenchmarkTime: 10000000, + Filter: "Benchmark1", + } + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Benchmark1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Benchmark1") + c.Check(helper.calls[6], Equals, "TearDownTest") + // ... and more. + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestBenchmarkBytes(c *C) { + helper := FixtureHelper{sleep: 100000} + output := String{} + runConf := RunConf{ + Output: &output, + Benchmark: true, + BenchmarkTime: 10000000, + Filter: "Benchmark2", + } + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestBenchmarkMem(c *C) { + helper := FixtureHelper{sleep: 100000} + output := String{} + runConf := RunConf{ + Output: &output, + Benchmark: true, + BenchmarkMem: true, + BenchmarkTime: 10000000, + Filter: "Benchmark3", + } + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *4[48] B/op\t *1 allocs/op\n" + c.Assert(output.value, Matches, expected) +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go new file mode 100644 index 0000000..0db2c01 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go @@ -0,0 +1,82 @@ +// These initial tests are for bootstrapping. They verify that we can +// basically use the testing infrastructure itself to check if the test +// system is working. +// +// These tests use will break down the test runner badly in case of +// errors because if they simply fail, we can't be sure the developer +// will ever see anything (because failing means the failing system +// somehow isn't working! :-) +// +// Do not assume *any* internal functionality works as expected besides +// what's actually tested here. + +package check_test + +import ( + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" + "strings" +) + +type BootstrapS struct{} + +var boostrapS = check.Suite(&BootstrapS{}) + +func (s *BootstrapS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +func (s *BootstrapS) TestFailedAndFail(c *check.C) { + if c.Failed() { + critical("c.Failed() must be false first!") + } + c.Fail() + if !c.Failed() { + critical("c.Fail() didn't put the test in a failed state!") + } + c.Succeed() +} + +func (s *BootstrapS) TestFailedAndSucceed(c *check.C) { + c.Fail() + c.Succeed() + if c.Failed() { + critical("c.Succeed() didn't put the test back in a non-failed state") + } +} + +func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) { + c.Log("Hello there!") + log := c.GetTestLog() + if log != "Hello there!\n" { + critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log)) + } +} + +func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) { + c.Logf("Hello %v", "there!") + log := c.GetTestLog() + if log != "Hello there!\n" { + critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log)) + } +} + +func (s *BootstrapS) TestRunShowsErrors(c *check.C) { + output := String{} + check.Run(&FailHelper{}, &check.RunConf{Output: &output}) + if strings.Index(output.value, "Expected failure!") == -1 { + critical(fmt.Sprintf("RunWithWriter() output did not contain the "+ + "expected failure! Got: %#v", + output.value)) + } +} + +func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) { + output := String{} + check.Run(&SuccessHelper{}, &check.RunConf{Output: &output}) + if strings.Index(output.value, "Expected success!") != -1 { + critical(fmt.Sprintf("RunWithWriter() output contained a successful "+ + "test! Got: %#v", + output.value)) + } +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/check.go b/Godeps/_workspace/src/gopkg.in/check.v1/check.go new file mode 100644 index 0000000..8868a9f --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/check.go @@ -0,0 +1,943 @@ +// Package check is a rich testing extension for Go's testing package. +// +// For details about the project, see: +// +// http://labix.org/gocheck +// +package check + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// ----------------------------------------------------------------------- +// Internal type which deals with suite method calling. + +const ( + fixtureKd = iota + testKd +) + +type funcKind int + +const ( + succeededSt = iota + failedSt + skippedSt + panickedSt + fixturePanickedSt + missedSt +) + +type funcStatus int + +// A method value can't reach its own Method structure. +type methodType struct { + reflect.Value + Info reflect.Method +} + +func newMethod(receiver reflect.Value, i int) *methodType { + return &methodType{receiver.Method(i), receiver.Type().Method(i)} +} + +func (method *methodType) PC() uintptr { + return method.Info.Func.Pointer() +} + +func (method *methodType) suiteName() string { + t := method.Info.Type.In(0) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Name() +} + +func (method *methodType) String() string { + return method.suiteName() + "." + method.Info.Name +} + +func (method *methodType) matches(re *regexp.Regexp) bool { + return (re.MatchString(method.Info.Name) || + re.MatchString(method.suiteName()) || + re.MatchString(method.String())) +} + +type C struct { + method *methodType + kind funcKind + testName string + status funcStatus + logb *logger + logw io.Writer + done chan *C + reason string + mustFail bool + tempDir *tempDir + benchMem bool + startTime time.Time + timer +} + +func (c *C) stopNow() { + runtime.Goexit() +} + +// logger is a concurrency safe byte.Buffer +type logger struct { + sync.Mutex + writer bytes.Buffer +} + +func (l *logger) Write(buf []byte) (int, error) { + l.Lock() + defer l.Unlock() + return l.writer.Write(buf) +} + +func (l *logger) WriteTo(w io.Writer) (int64, error) { + l.Lock() + defer l.Unlock() + return l.writer.WriteTo(w) +} + +func (l *logger) String() string { + l.Lock() + defer l.Unlock() + return l.writer.String() +} + +// ----------------------------------------------------------------------- +// Handling of temporary files and directories. + +type tempDir struct { + sync.Mutex + path string + counter int +} + +func (td *tempDir) newPath() string { + td.Lock() + defer td.Unlock() + if td.path == "" { + var err error + for i := 0; i != 100; i++ { + path := fmt.Sprintf("%s/check-%d", os.TempDir(), rand.Int()) + if err = os.Mkdir(path, 0700); err == nil { + td.path = path + break + } + } + if td.path == "" { + panic("Couldn't create temporary directory: " + err.Error()) + } + } + result := filepath.Join(td.path, strconv.Itoa(td.counter)) + td.counter += 1 + return result +} + +func (td *tempDir) removeAll() { + td.Lock() + defer td.Unlock() + if td.path != "" { + err := os.RemoveAll(td.path) + if err != nil { + fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) + } + } +} + +// Create a new temporary directory which is automatically removed after +// the suite finishes running. +func (c *C) MkDir() string { + path := c.tempDir.newPath() + if err := os.Mkdir(path, 0700); err != nil { + panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) + } + return path +} + +// ----------------------------------------------------------------------- +// Low-level logging functions. + +func (c *C) log(args ...interface{}) { + c.writeLog([]byte(fmt.Sprint(args...) + "\n")) +} + +func (c *C) logf(format string, args ...interface{}) { + c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) +} + +func (c *C) logNewLine() { + c.writeLog([]byte{'\n'}) +} + +func (c *C) writeLog(buf []byte) { + c.logb.Write(buf) + if c.logw != nil { + c.logw.Write(buf) + } +} + +func hasStringOrError(x interface{}) (ok bool) { + _, ok = x.(fmt.Stringer) + if ok { + return + } + _, ok = x.(error) + return +} + +func (c *C) logValue(label string, value interface{}) { + if label == "" { + if hasStringOrError(value) { + c.logf("... %#v (%q)", value, value) + } else { + c.logf("... %#v", value) + } + } else if value == nil { + c.logf("... %s = nil", label) + } else { + if hasStringOrError(value) { + fv := fmt.Sprintf("%#v", value) + qv := fmt.Sprintf("%q", value) + if fv != qv { + c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) + return + } + } + if s, ok := value.(string); ok && isMultiLine(s) { + c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) + c.logMultiLine(s) + } else { + c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) + } + } +} + +func (c *C) logMultiLine(s string) { + b := make([]byte, 0, len(s)*2) + i := 0 + n := len(s) + for i < n { + j := i + 1 + for j < n && s[j-1] != '\n' { + j++ + } + b = append(b, "... "...) + b = strconv.AppendQuote(b, s[i:j]) + if j < n { + b = append(b, " +"...) + } + b = append(b, '\n') + i = j + } + c.writeLog(b) +} + +func isMultiLine(s string) bool { + for i := 0; i+1 < len(s); i++ { + if s[i] == '\n' { + return true + } + } + return false +} + +func (c *C) logString(issue string) { + c.log("... ", issue) +} + +func (c *C) logCaller(skip int) { + // This is a bit heavier than it ought to be. + skip += 1 // Our own frame. + pc, callerFile, callerLine, ok := runtime.Caller(skip) + if !ok { + return + } + var testFile string + var testLine int + testFunc := runtime.FuncForPC(c.method.PC()) + if runtime.FuncForPC(pc) != testFunc { + for { + skip += 1 + if pc, file, line, ok := runtime.Caller(skip); ok { + // Note that the test line may be different on + // distinct calls for the same test. Showing + // the "internal" line is helpful when debugging. + if runtime.FuncForPC(pc) == testFunc { + testFile, testLine = file, line + break + } + } else { + break + } + } + } + if testFile != "" && (testFile != callerFile || testLine != callerLine) { + c.logCode(testFile, testLine) + } + c.logCode(callerFile, callerLine) +} + +func (c *C) logCode(path string, line int) { + c.logf("%s:%d:", nicePath(path), line) + code, err := printLine(path, line) + if code == "" { + code = "..." // XXX Open the file and take the raw line. + if err != nil { + code += err.Error() + } + } + c.log(indent(code, " ")) +} + +var valueGo = filepath.Join("reflect", "value.go") +var asmGo = filepath.Join("runtime", "asm_") + +func (c *C) logPanic(skip int, value interface{}) { + skip += 1 // Our own frame. + initialSkip := skip + for { + if pc, file, line, ok := runtime.Caller(skip); ok { + if skip == initialSkip { + c.logf("... Panic: %s (PC=0x%X)\n", value, pc) + } + name := niceFuncName(pc) + path := nicePath(file) + if name == "Value.call" && strings.HasSuffix(path, valueGo) { + break + } + if name == "call16" && strings.Contains(path, asmGo) { + break + } + c.logf("%s:%d\n in %s", nicePath(file), line, name) + } else { + break + } + skip += 1 + } +} + +func (c *C) logSoftPanic(issue string) { + c.log("... Panic: ", issue) +} + +func (c *C) logArgPanic(method *methodType, expectedType string) { + c.logf("... Panic: %s argument should be %s", + niceFuncName(method.PC()), expectedType) +} + +// ----------------------------------------------------------------------- +// Some simple formatting helpers. + +var initWD, initWDErr = os.Getwd() + +func init() { + if initWDErr == nil { + initWD = strings.Replace(initWD, "\\", "/", -1) + "/" + } +} + +func nicePath(path string) string { + if initWDErr == nil { + if strings.HasPrefix(path, initWD) { + return path[len(initWD):] + } + } + return path +} + +func niceFuncPath(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + filename, line := function.FileLine(pc) + return fmt.Sprintf("%s:%d", nicePath(filename), line) + } + return "" +} + +func niceFuncName(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + name := path.Base(function.Name()) + if i := strings.Index(name, "."); i > 0 { + name = name[i+1:] + } + if strings.HasPrefix(name, "(*") { + if i := strings.Index(name, ")"); i > 0 { + name = name[2:i] + name[i+1:] + } + } + if i := strings.LastIndex(name, ".*"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + if i := strings.LastIndex(name, "·"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + return name + } + return "" +} + +// ----------------------------------------------------------------------- +// Result tracker to aggregate call results. + +type Result struct { + Succeeded int + Failed int + Skipped int + Panicked int + FixturePanicked int + ExpectedFailures int + Missed int // Not even tried to run, related to a panic in the fixture. + RunError error // Houston, we've got a problem. + WorkDir string // If KeepWorkDir is true +} + +type resultTracker struct { + result Result + _lastWasProblem bool + _waiting int + _missed int + _expectChan chan *C + _doneChan chan *C + _stopChan chan bool +} + +func newResultTracker() *resultTracker { + return &resultTracker{_expectChan: make(chan *C), // Synchronous + _doneChan: make(chan *C, 32), // Asynchronous + _stopChan: make(chan bool)} // Synchronous +} + +func (tracker *resultTracker) start() { + go tracker._loopRoutine() +} + +func (tracker *resultTracker) waitAndStop() { + <-tracker._stopChan +} + +func (tracker *resultTracker) expectCall(c *C) { + tracker._expectChan <- c +} + +func (tracker *resultTracker) callDone(c *C) { + tracker._doneChan <- c +} + +func (tracker *resultTracker) _loopRoutine() { + for { + var c *C + if tracker._waiting > 0 { + // Calls still running. Can't stop. + select { + // XXX Reindent this (not now to make diff clear) + case c = <-tracker._expectChan: + tracker._waiting += 1 + case c = <-tracker._doneChan: + tracker._waiting -= 1 + switch c.status { + case succeededSt: + if c.kind == testKd { + if c.mustFail { + tracker.result.ExpectedFailures++ + } else { + tracker.result.Succeeded++ + } + } + case failedSt: + tracker.result.Failed++ + case panickedSt: + if c.kind == fixtureKd { + tracker.result.FixturePanicked++ + } else { + tracker.result.Panicked++ + } + case fixturePanickedSt: + // Track it as missed, since the panic + // was on the fixture, not on the test. + tracker.result.Missed++ + case missedSt: + tracker.result.Missed++ + case skippedSt: + if c.kind == testKd { + tracker.result.Skipped++ + } + } + } + } else { + // No calls. Can stop, but no done calls here. + select { + case tracker._stopChan <- true: + return + case c = <-tracker._expectChan: + tracker._waiting += 1 + case c = <-tracker._doneChan: + panic("Tracker got an unexpected done call.") + } + } + } +} + +// ----------------------------------------------------------------------- +// The underlying suite runner. + +type suiteRunner struct { + suite interface{} + setUpSuite, tearDownSuite *methodType + setUpTest, tearDownTest *methodType + tests []*methodType + tracker *resultTracker + tempDir *tempDir + keepDir bool + output *outputWriter + reportedProblemLast bool + benchTime time.Duration + benchMem bool +} + +type RunConf struct { + Output io.Writer + Stream bool + Verbose bool + Filter string + Benchmark bool + BenchmarkTime time.Duration // Defaults to 1 second + BenchmarkMem bool + KeepWorkDir bool +} + +// Create a new suiteRunner able to run all methods in the given suite. +func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { + var conf RunConf + if runConf != nil { + conf = *runConf + } + if conf.Output == nil { + conf.Output = os.Stdout + } + if conf.Benchmark { + conf.Verbose = true + } + + suiteType := reflect.TypeOf(suite) + suiteNumMethods := suiteType.NumMethod() + suiteValue := reflect.ValueOf(suite) + + runner := &suiteRunner{ + suite: suite, + output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), + tracker: newResultTracker(), + benchTime: conf.BenchmarkTime, + benchMem: conf.BenchmarkMem, + tempDir: &tempDir{}, + keepDir: conf.KeepWorkDir, + tests: make([]*methodType, 0, suiteNumMethods), + } + if runner.benchTime == 0 { + runner.benchTime = 1 * time.Second + } + + var filterRegexp *regexp.Regexp + if conf.Filter != "" { + if regexp, err := regexp.Compile(conf.Filter); err != nil { + msg := "Bad filter expression: " + err.Error() + runner.tracker.result.RunError = errors.New(msg) + return runner + } else { + filterRegexp = regexp + } + } + + for i := 0; i != suiteNumMethods; i++ { + method := newMethod(suiteValue, i) + switch method.Info.Name { + case "SetUpSuite": + runner.setUpSuite = method + case "TearDownSuite": + runner.tearDownSuite = method + case "SetUpTest": + runner.setUpTest = method + case "TearDownTest": + runner.tearDownTest = method + default: + prefix := "Test" + if conf.Benchmark { + prefix = "Benchmark" + } + if !strings.HasPrefix(method.Info.Name, prefix) { + continue + } + if filterRegexp == nil || method.matches(filterRegexp) { + runner.tests = append(runner.tests, method) + } + } + } + return runner +} + +// Run all methods in the given suite. +func (runner *suiteRunner) run() *Result { + if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { + runner.tracker.start() + if runner.checkFixtureArgs() { + c := runner.runFixture(runner.setUpSuite, "", nil) + if c == nil || c.status == succeededSt { + for i := 0; i != len(runner.tests); i++ { + c := runner.runTest(runner.tests[i]) + if c.status == fixturePanickedSt { + runner.skipTests(missedSt, runner.tests[i+1:]) + break + } + } + } else if c != nil && c.status == skippedSt { + runner.skipTests(skippedSt, runner.tests) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.runFixture(runner.tearDownSuite, "", nil) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.tracker.waitAndStop() + if runner.keepDir { + runner.tracker.result.WorkDir = runner.tempDir.path + } else { + runner.tempDir.removeAll() + } + } + return &runner.tracker.result +} + +// Create a call object with the given suite method, and fork a +// goroutine with the provided dispatcher for running it. +func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + var logw io.Writer + if runner.output.Stream { + logw = runner.output + } + if logb == nil { + logb = new(logger) + } + c := &C{ + method: method, + kind: kind, + testName: testName, + logb: logb, + logw: logw, + tempDir: runner.tempDir, + done: make(chan *C, 1), + timer: timer{benchTime: runner.benchTime}, + startTime: time.Now(), + benchMem: runner.benchMem, + } + runner.tracker.expectCall(c) + go (func() { + runner.reportCallStarted(c) + defer runner.callDone(c) + dispatcher(c) + })() + return c +} + +// Same as forkCall(), but wait for call to finish before returning. +func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + c := runner.forkCall(method, kind, testName, logb, dispatcher) + <-c.done + return c +} + +// Handle a finished call. If there were any panics, update the call status +// accordingly. Then, mark the call as done and report to the tracker. +func (runner *suiteRunner) callDone(c *C) { + value := recover() + if value != nil { + switch v := value.(type) { + case *fixturePanic: + if v.status == skippedSt { + c.status = skippedSt + } else { + c.logSoftPanic("Fixture has panicked (see related PANIC)") + c.status = fixturePanickedSt + } + default: + c.logPanic(1, value) + c.status = panickedSt + } + } + if c.mustFail { + switch c.status { + case failedSt: + c.status = succeededSt + case succeededSt: + c.status = failedSt + c.logString("Error: Test succeeded, but was expected to fail") + c.logString("Reason: " + c.reason) + } + } + + runner.reportCallDone(c) + c.done <- c +} + +// Runs a fixture call synchronously. The fixture will still be run in a +// goroutine like all suite methods, but this method will not return +// while the fixture goroutine is not done, because the fixture must be +// run in a desired order. +func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { + if method != nil { + c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { + c.ResetTimer() + c.StartTimer() + defer c.StopTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + }) + return c + } + return nil +} + +// Run the fixture method with runFixture(), but panic with a fixturePanic{} +// in case the fixture method panics. This makes it easier to track the +// fixture panic together with other call panics within forkTest(). +func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { + if skipped != nil && *skipped { + return nil + } + c := runner.runFixture(method, testName, logb) + if c != nil && c.status != succeededSt { + if skipped != nil { + *skipped = c.status == skippedSt + } + panic(&fixturePanic{c.status, method}) + } + return c +} + +type fixturePanic struct { + status funcStatus + method *methodType +} + +// Run the suite test method, together with the test-specific fixture, +// asynchronously. +func (runner *suiteRunner) forkTest(method *methodType) *C { + testName := method.String() + return runner.forkCall(method, testKd, testName, nil, func(c *C) { + var skipped bool + defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) + defer c.StopTimer() + benchN := 1 + for { + runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) + mt := c.method.Type() + if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { + // Rather than a plain panic, provide a more helpful message when + // the argument type is incorrect. + c.status = panickedSt + c.logArgPanic(c.method, "*check.C") + return + } + if strings.HasPrefix(c.method.Info.Name, "Test") { + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + return + } + if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { + panic("unexpected method prefix: " + c.method.Info.Name) + } + + runtime.GC() + c.N = benchN + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + c.StopTimer() + if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { + return + } + perOpN := int(1e9) + if c.nsPerOp() != 0 { + perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) + } + + // Logic taken from the stock testing package: + // - Run more iterations than we think we'll need for a second (1.5x). + // - Don't grow too fast in case we had timing errors previously. + // - Be sure to run at least one more than last time. + benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) + benchN = roundUp(benchN) + + skipped = true // Don't run the deferred one if this panics. + runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) + skipped = false + } + }) +} + +// Same as forkTest(), but wait for the test to finish before returning. +func (runner *suiteRunner) runTest(method *methodType) *C { + c := runner.forkTest(method) + <-c.done + return c +} + +// Helper to mark tests as skipped or missed. A bit heavy for what +// it does, but it enables homogeneous handling of tracking, including +// nice verbose output. +func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { + for _, method := range methods { + runner.runFunc(method, testKd, "", nil, func(c *C) { + c.status = status + }) + } +} + +// Verify if the fixture arguments are *check.C. In case of errors, +// log the error as a panic in the fixture method call, and return false. +func (runner *suiteRunner) checkFixtureArgs() bool { + succeeded := true + argType := reflect.TypeOf(&C{}) + for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { + if method != nil { + mt := method.Type() + if mt.NumIn() != 1 || mt.In(0) != argType { + succeeded = false + runner.runFunc(method, fixtureKd, "", nil, func(c *C) { + c.logArgPanic(method, "*check.C") + c.status = panickedSt + }) + } + } + } + return succeeded +} + +func (runner *suiteRunner) reportCallStarted(c *C) { + runner.output.WriteCallStarted("START", c) +} + +func (runner *suiteRunner) reportCallDone(c *C) { + runner.tracker.callDone(c) + switch c.status { + case succeededSt: + if c.mustFail { + runner.output.WriteCallSuccess("FAIL EXPECTED", c) + } else { + runner.output.WriteCallSuccess("PASS", c) + } + case skippedSt: + runner.output.WriteCallSuccess("SKIP", c) + case failedSt: + runner.output.WriteCallProblem("FAIL", c) + case panickedSt: + runner.output.WriteCallProblem("PANIC", c) + case fixturePanickedSt: + // That's a testKd call reporting that its fixture + // has panicked. The fixture call which caused the + // panic itself was tracked above. We'll report to + // aid debugging. + runner.output.WriteCallProblem("PANIC", c) + case missedSt: + runner.output.WriteCallSuccess("MISS", c) + } +} + +// ----------------------------------------------------------------------- +// Output writer manages atomic output writing according to settings. + +type outputWriter struct { + m sync.Mutex + writer io.Writer + wroteCallProblemLast bool + Stream bool + Verbose bool +} + +func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { + return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} +} + +func (ow *outputWriter) Write(content []byte) (n int, err error) { + ow.m.Lock() + n, err = ow.writer.Write(content) + ow.m.Unlock() + return +} + +func (ow *outputWriter) WriteCallStarted(label string, c *C) { + if ow.Stream { + header := renderCallHeader(label, c, "", "\n") + ow.m.Lock() + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func (ow *outputWriter) WriteCallProblem(label string, c *C) { + var prefix string + if !ow.Stream { + prefix = "\n-----------------------------------" + + "-----------------------------------\n" + } + header := renderCallHeader(label, c, prefix, "\n\n") + ow.m.Lock() + ow.wroteCallProblemLast = true + ow.writer.Write([]byte(header)) + if !ow.Stream { + c.logb.WriteTo(ow.writer) + } + ow.m.Unlock() +} + +func (ow *outputWriter) WriteCallSuccess(label string, c *C) { + if ow.Stream || (ow.Verbose && c.kind == testKd) { + // TODO Use a buffer here. + var suffix string + if c.reason != "" { + suffix = " (" + c.reason + ")" + } + if c.status == succeededSt { + suffix += "\t" + c.timerString() + } + suffix += "\n" + if ow.Stream { + suffix += "\n" + } + header := renderCallHeader(label, c, "", suffix) + ow.m.Lock() + // Resist temptation of using line as prefix above due to race. + if !ow.Stream && ow.wroteCallProblemLast { + header = "\n-----------------------------------" + + "-----------------------------------\n" + + header + } + ow.wroteCallProblemLast = false + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func renderCallHeader(label string, c *C, prefix, suffix string) string { + pc := c.method.PC() + return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), + niceFuncName(pc), suffix) +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go new file mode 100644 index 0000000..9486116 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go @@ -0,0 +1,207 @@ +// This file contains just a few generic helpers which are used by the +// other test files. + +package check_test + +import ( + "flag" + "fmt" + "os" + "regexp" + "runtime" + "testing" + "time" + + "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +// We count the number of suites run at least to get a vague hint that the +// test suite is behaving as it should. Otherwise a bug introduced at the +// very core of the system could go unperceived. +const suitesRunExpected = 8 + +var suitesRun int = 0 + +func Test(t *testing.T) { + check.TestingT(t) + if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" { + critical(fmt.Sprintf("Expected %d suites to run rather than %d", + suitesRunExpected, suitesRun)) + } +} + +// ----------------------------------------------------------------------- +// Helper functions. + +// Break down badly. This is used in test cases which can't yet assume +// that the fundamental bits are working. +func critical(error string) { + fmt.Fprintln(os.Stderr, "CRITICAL: "+error) + os.Exit(1) +} + +// Return the file line where it's called. +func getMyLine() int { + if _, _, line, ok := runtime.Caller(1); ok { + return line + } + return -1 +} + +// ----------------------------------------------------------------------- +// Helper type implementing a basic io.Writer for testing output. + +// Type implementing the io.Writer interface for analyzing output. +type String struct { + value string +} + +// The only function required by the io.Writer interface. Will append +// written data to the String.value string. +func (s *String) Write(p []byte) (n int, err error) { + s.value += string(p) + return len(p), nil +} + +// Trivial wrapper to test errors happening on a different file +// than the test itself. +func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) { + return c.Check(obtained, check.Equals, expected), getMyLine() +} + +// ----------------------------------------------------------------------- +// Helper suite for testing basic fail behavior. + +type FailHelper struct { + testLine int +} + +func (s *FailHelper) TestLogAndFail(c *check.C) { + s.testLine = getMyLine() - 1 + c.Log("Expected failure!") + c.Fail() +} + +// ----------------------------------------------------------------------- +// Helper suite for testing basic success behavior. + +type SuccessHelper struct{} + +func (s *SuccessHelper) TestLogAndSucceed(c *check.C) { + c.Log("Expected success!") +} + +// ----------------------------------------------------------------------- +// Helper suite for testing ordering and behavior of fixture. + +type FixtureHelper struct { + calls []string + panicOn string + skip bool + skipOnN int + sleepOn string + sleep time.Duration + bytes int64 +} + +func (s *FixtureHelper) trace(name string, c *check.C) { + s.calls = append(s.calls, name) + if name == s.panicOn { + panic(name) + } + if s.sleep > 0 && s.sleepOn == name { + time.Sleep(s.sleep) + } + if s.skip && s.skipOnN == len(s.calls)-1 { + c.Skip("skipOnN == n") + } +} + +func (s *FixtureHelper) SetUpSuite(c *check.C) { + s.trace("SetUpSuite", c) +} + +func (s *FixtureHelper) TearDownSuite(c *check.C) { + s.trace("TearDownSuite", c) +} + +func (s *FixtureHelper) SetUpTest(c *check.C) { + s.trace("SetUpTest", c) +} + +func (s *FixtureHelper) TearDownTest(c *check.C) { + s.trace("TearDownTest", c) +} + +func (s *FixtureHelper) Test1(c *check.C) { + s.trace("Test1", c) +} + +func (s *FixtureHelper) Test2(c *check.C) { + s.trace("Test2", c) +} + +func (s *FixtureHelper) Benchmark1(c *check.C) { + s.trace("Benchmark1", c) + for i := 0; i < c.N; i++ { + time.Sleep(s.sleep) + } +} + +func (s *FixtureHelper) Benchmark2(c *check.C) { + s.trace("Benchmark2", c) + c.SetBytes(1024) + for i := 0; i < c.N; i++ { + time.Sleep(s.sleep) + } +} + +func (s *FixtureHelper) Benchmark3(c *check.C) { + var x []int64 + s.trace("Benchmark3", c) + for i := 0; i < c.N; i++ { + time.Sleep(s.sleep) + x = make([]int64, 5) + _ = x + } +} + +// ----------------------------------------------------------------------- +// Helper which checks the state of the test and ensures that it matches +// the given expectations. Depends on c.Errorf() working, so shouldn't +// be used to test this one function. + +type expectedState struct { + name string + result interface{} + failed bool + log string +} + +// Verify the state of the test. Note that since this also verifies if +// the test is supposed to be in a failed state, no other checks should +// be done in addition to what is being tested. +func checkState(c *check.C, result interface{}, expected *expectedState) { + failed := c.Failed() + c.Succeed() + log := c.GetTestLog() + matched, matchError := regexp.MatchString("^"+expected.log+"$", log) + if matchError != nil { + c.Errorf("Error in matching expression used in testing %s", + expected.name) + } else if !matched { + c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------", + expected.name, log, expected.log) + } + if result != expected.result { + c.Errorf("%s returned %#v rather than %#v", + expected.name, result, expected.result) + } + if failed != expected.failed { + if failed { + c.Errorf("%s has failed when it shouldn't", expected.name) + } else { + c.Errorf("%s has not failed when it should", expected.name) + } + } +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go b/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go new file mode 100644 index 0000000..bac3387 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go @@ -0,0 +1,458 @@ +package check + +import ( + "fmt" + "reflect" + "regexp" +) + +// ----------------------------------------------------------------------- +// CommentInterface and Commentf helper, to attach extra information to checks. + +type comment struct { + format string + args []interface{} +} + +// Commentf returns an infomational value to use with Assert or Check calls. +// If the checker test fails, the provided arguments will be passed to +// fmt.Sprintf, and will be presented next to the logged failure. +// +// For example: +// +// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) +// +// Note that if the comment is constant, a better option is to +// simply use a normal comment right above or next to the line, as +// it will also get printed with any errors: +// +// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) +// +func Commentf(format string, args ...interface{}) CommentInterface { + return &comment{format, args} +} + +// CommentInterface must be implemented by types that attach extra +// information to failed checks. See the Commentf function for details. +type CommentInterface interface { + CheckCommentString() string +} + +func (c *comment) CheckCommentString() string { + return fmt.Sprintf(c.format, c.args...) +} + +// ----------------------------------------------------------------------- +// The Checker interface. + +// The Checker interface must be provided by checkers used with +// the Assert and Check verification methods. +type Checker interface { + Info() *CheckerInfo + Check(params []interface{}, names []string) (result bool, error string) +} + +// See the Checker interface. +type CheckerInfo struct { + Name string + Params []string +} + +func (info *CheckerInfo) Info() *CheckerInfo { + return info +} + +// ----------------------------------------------------------------------- +// Not checker logic inverter. + +// The Not checker inverts the logic of the provided checker. The +// resulting checker will succeed where the original one failed, and +// vice-versa. +// +// For example: +// +// c.Assert(a, Not(Equals), b) +// +func Not(checker Checker) Checker { + return ¬Checker{checker} +} + +type notChecker struct { + sub Checker +} + +func (checker *notChecker) Info() *CheckerInfo { + info := *checker.sub.Info() + info.Name = "Not(" + info.Name + ")" + return &info +} + +func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { + result, error = checker.sub.Check(params, names) + result = !result + return +} + +// ----------------------------------------------------------------------- +// IsNil checker. + +type isNilChecker struct { + *CheckerInfo +} + +// The IsNil checker tests whether the obtained value is nil. +// +// For example: +// +// c.Assert(err, IsNil) +// +var IsNil Checker = &isNilChecker{ + &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, +} + +func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return isNil(params[0]), "" +} + +func isNil(obtained interface{}) (result bool) { + if obtained == nil { + result = true + } else { + switch v := reflect.ValueOf(obtained); v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + } + return +} + +// ----------------------------------------------------------------------- +// NotNil checker. Alias for Not(IsNil), since it's so common. + +type notNilChecker struct { + *CheckerInfo +} + +// The NotNil checker verifies that the obtained value is not nil. +// +// For example: +// +// c.Assert(iface, NotNil) +// +// This is an alias for Not(IsNil), made available since it's a +// fairly common check. +// +var NotNil Checker = ¬NilChecker{ + &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, +} + +func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return !isNil(params[0]), "" +} + +// ----------------------------------------------------------------------- +// Equals checker. + +type equalsChecker struct { + *CheckerInfo +} + +// The Equals checker verifies that the obtained value is equal to +// the expected value, according to usual Go semantics for ==. +// +// For example: +// +// c.Assert(value, Equals, 42) +// +var Equals Checker = &equalsChecker{ + &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, +} + +func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { + defer func() { + if v := recover(); v != nil { + result = false + error = fmt.Sprint(v) + } + }() + return params[0] == params[1], "" +} + +// ----------------------------------------------------------------------- +// DeepEquals checker. + +type deepEqualsChecker struct { + *CheckerInfo +} + +// The DeepEquals checker verifies that the obtained value is deep-equal to +// the expected value. The check will work correctly even when facing +// slices, interfaces, and values of different types (which always fail +// the test). +// +// For example: +// +// c.Assert(value, DeepEquals, 42) +// c.Assert(array, DeepEquals, []string{"hi", "there"}) +// +var DeepEquals Checker = &deepEqualsChecker{ + &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { + return reflect.DeepEqual(params[0], params[1]), "" +} + +// ----------------------------------------------------------------------- +// HasLen checker. + +type hasLenChecker struct { + *CheckerInfo +} + +// The HasLen checker verifies that the obtained value has the +// provided length. In many cases this is superior to using Equals +// in conjuction with the len function because in case the check +// fails the value itself will be printed, instead of its length, +// providing more details for figuring the problem. +// +// For example: +// +// c.Assert(list, HasLen, 5) +// +var HasLen Checker = &hasLenChecker{ + &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, +} + +func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { + n, ok := params[1].(int) + if !ok { + return false, "n must be an int" + } + value := reflect.ValueOf(params[0]) + switch value.Kind() { + case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: + default: + return false, "obtained value type has no length" + } + return value.Len() == n, "" +} + +// ----------------------------------------------------------------------- +// ErrorMatches checker. + +type errorMatchesChecker struct { + *CheckerInfo +} + +// The ErrorMatches checker verifies that the error value +// is non nil and matches the regular expression provided. +// +// For example: +// +// c.Assert(err, ErrorMatches, "perm.*denied") +// +var ErrorMatches Checker = errorMatchesChecker{ + &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, +} + +func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { + if params[0] == nil { + return false, "Error value is nil" + } + err, ok := params[0].(error) + if !ok { + return false, "Value is not an error" + } + params[0] = err.Error() + names[0] = "error" + return matches(params[0], params[1]) +} + +// ----------------------------------------------------------------------- +// Matches checker. + +type matchesChecker struct { + *CheckerInfo +} + +// The Matches checker verifies that the string provided as the obtained +// value (or the string resulting from obtained.String()) matches the +// regular expression provided. +// +// For example: +// +// c.Assert(err, Matches, "perm.*denied") +// +var Matches Checker = &matchesChecker{ + &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, +} + +func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { + return matches(params[0], params[1]) +} + +func matches(value, regex interface{}) (result bool, error string) { + reStr, ok := regex.(string) + if !ok { + return false, "Regex must be a string" + } + valueStr, valueIsStr := value.(string) + if !valueIsStr { + if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { + valueStr, valueIsStr = valueWithStr.String(), true + } + } + if valueIsStr { + matches, err := regexp.MatchString("^"+reStr+"$", valueStr) + if err != nil { + return false, "Can't compile regex: " + err.Error() + } + return matches, "" + } + return false, "Obtained value is not a string and has no .String()" +} + +// ----------------------------------------------------------------------- +// Panics checker. + +type panicsChecker struct { + *CheckerInfo +} + +// The Panics checker verifies that calling the provided zero-argument +// function will cause a panic which is deep-equal to the provided value. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). +// +// +var Panics Checker = &panicsChecker{ + &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, +} + +func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if error != "" { + return + } + params[0] = recover() + names[0] = "panic" + result = reflect.DeepEqual(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +type panicMatchesChecker struct { + *CheckerInfo +} + +// The PanicMatches checker verifies that calling the provided zero-argument +// function will cause a panic with an error value matching +// the regular expression provided. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). +// +// +var PanicMatches Checker = &panicMatchesChecker{ + &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, +} + +func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if errmsg != "" { + return + } + obtained := recover() + names[0] = "panic" + if e, ok := obtained.(error); ok { + params[0] = e.Error() + } else if _, ok := obtained.(string); ok { + params[0] = obtained + } else { + errmsg = "Panic value is not a string or an error" + return + } + result, errmsg = matches(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +// ----------------------------------------------------------------------- +// FitsTypeOf checker. + +type fitsTypeChecker struct { + *CheckerInfo +} + +// The FitsTypeOf checker verifies that the obtained value is +// assignable to a variable with the same type as the provided +// sample value. +// +// For example: +// +// c.Assert(value, FitsTypeOf, int64(0)) +// c.Assert(value, FitsTypeOf, os.Error(nil)) +// +var FitsTypeOf Checker = &fitsTypeChecker{ + &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, +} + +func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + sample := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !sample.IsValid() { + return false, "Invalid sample value" + } + return obtained.Type().AssignableTo(sample.Type()), "" +} + +// ----------------------------------------------------------------------- +// Implements checker. + +type implementsChecker struct { + *CheckerInfo +} + +// The Implements checker verifies that the obtained value +// implements the interface specified via a pointer to an interface +// variable. +// +// For example: +// +// var e os.Error +// c.Assert(err, Implements, &e) +// +var Implements Checker = &implementsChecker{ + &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, +} + +func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + ifaceptr := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { + return false, "ifaceptr should be a pointer to an interface variable" + } + return obtained.Type().Implements(ifaceptr.Elem().Type()), "" +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go new file mode 100644 index 0000000..d71a88f --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go @@ -0,0 +1,272 @@ +package check_test + +import ( + "errors" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" + "reflect" + "runtime" +) + +type CheckersS struct{} + +var _ = check.Suite(&CheckersS{}) + +func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) { + info := checker.Info() + if info.Name != name { + c.Fatalf("Got name %s, expected %s", info.Name, name) + } + if !reflect.DeepEqual(info.Params, paramNames) { + c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames) + } +} + +func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) { + info := checker.Info() + if len(params) != len(info.Params) { + c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params)) + } + names := append([]string{}, info.Params...) + result_, error_ := checker.Check(params, names) + if result_ != result || error_ != error { + c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)", + info.Name, params, result_, error_, result, error) + } + return params, names +} + +func (s *CheckersS) TestComment(c *check.C) { + bug := check.Commentf("a %d bc", 42) + comment := bug.CheckCommentString() + if comment != "a 42 bc" { + c.Fatalf("Commentf returned %#v", comment) + } +} + +func (s *CheckersS) TestIsNil(c *check.C) { + testInfo(c, check.IsNil, "IsNil", []string{"value"}) + + testCheck(c, check.IsNil, true, "", nil) + testCheck(c, check.IsNil, false, "", "a") + + testCheck(c, check.IsNil, true, "", (chan int)(nil)) + testCheck(c, check.IsNil, false, "", make(chan int)) + testCheck(c, check.IsNil, true, "", (error)(nil)) + testCheck(c, check.IsNil, false, "", errors.New("")) + testCheck(c, check.IsNil, true, "", ([]int)(nil)) + testCheck(c, check.IsNil, false, "", make([]int, 1)) + testCheck(c, check.IsNil, false, "", int(0)) +} + +func (s *CheckersS) TestNotNil(c *check.C) { + testInfo(c, check.NotNil, "NotNil", []string{"value"}) + + testCheck(c, check.NotNil, false, "", nil) + testCheck(c, check.NotNil, true, "", "a") + + testCheck(c, check.NotNil, false, "", (chan int)(nil)) + testCheck(c, check.NotNil, true, "", make(chan int)) + testCheck(c, check.NotNil, false, "", (error)(nil)) + testCheck(c, check.NotNil, true, "", errors.New("")) + testCheck(c, check.NotNil, false, "", ([]int)(nil)) + testCheck(c, check.NotNil, true, "", make([]int, 1)) +} + +func (s *CheckersS) TestNot(c *check.C) { + testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"}) + + testCheck(c, check.Not(check.IsNil), false, "", nil) + testCheck(c, check.Not(check.IsNil), true, "", "a") +} + +type simpleStruct struct { + i int +} + +func (s *CheckersS) TestEquals(c *check.C) { + testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"}) + + // The simplest. + testCheck(c, check.Equals, true, "", 42, 42) + testCheck(c, check.Equals, false, "", 42, 43) + + // Different native types. + testCheck(c, check.Equals, false, "", int32(42), int64(42)) + + // With nil. + testCheck(c, check.Equals, false, "", 42, nil) + + // Slices + testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2}) + + // Struct values + testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1}) + testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2}) + + // Struct pointers + testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1}) + testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2}) +} + +func (s *CheckersS) TestDeepEquals(c *check.C) { + testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"}) + + // The simplest. + testCheck(c, check.DeepEquals, true, "", 42, 42) + testCheck(c, check.DeepEquals, false, "", 42, 43) + + // Different native types. + testCheck(c, check.DeepEquals, false, "", int32(42), int64(42)) + + // With nil. + testCheck(c, check.DeepEquals, false, "", 42, nil) + + // Slices + testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2}) + testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3}) + + // Struct values + testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1}) + testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2}) + + // Struct pointers + testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1}) + testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2}) +} + +func (s *CheckersS) TestHasLen(c *check.C) { + testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"}) + + testCheck(c, check.HasLen, true, "", "abcd", 4) + testCheck(c, check.HasLen, true, "", []int{1, 2}, 2) + testCheck(c, check.HasLen, false, "", []int{1, 2}, 3) + + testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2") + testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2) +} + +func (s *CheckersS) TestErrorMatches(c *check.C) { + testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"}) + + testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error") + testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error") + testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error") + testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or") + + // Verify params mutation + params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error") + c.Assert(params[0], check.Equals, "some error") + c.Assert(names[0], check.Equals, "error") +} + +func (s *CheckersS) TestMatches(c *check.C) { + testInfo(c, check.Matches, "Matches", []string{"value", "regex"}) + + // Simple matching + testCheck(c, check.Matches, true, "", "abc", "abc") + testCheck(c, check.Matches, true, "", "abc", "a.c") + + // Must match fully + testCheck(c, check.Matches, false, "", "abc", "ab") + testCheck(c, check.Matches, false, "", "abc", "bc") + + // String()-enabled values accepted + testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c") + testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d") + + // Some error conditions. + testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c") + testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c") +} + +func (s *CheckersS) TestPanics(c *check.C) { + testInfo(c, check.Panics, "Panics", []string{"function", "expected"}) + + // Some errors. + testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM") + testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM") + + // Plain strings. + testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM") + testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM") + testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM") + + // Error values. + testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM")) + testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) + + type deep struct{ i int } + // Deep value + testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99}) + + // Verify params/names mutation + params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) + c.Assert(params[0], check.ErrorMatches, "KABOOM") + c.Assert(names[0], check.Equals, "panic") + + // Verify a nil panic + testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil) + testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE") +} + +func (s *CheckersS) TestPanicMatches(c *check.C) { + testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"}) + + // Error matching. + testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M") + testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M") + + // Some errors. + testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM") + testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM") + + // Plain strings. + testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M") + testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM") + testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M") + + // Verify params/names mutation + params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM") + c.Assert(params[0], check.Equals, "KABOOM") + c.Assert(names[0], check.Equals, "panic") + + // Verify a nil panic + testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "") +} + +func (s *CheckersS) TestFitsTypeOf(c *check.C) { + testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"}) + + // Basic types + testCheck(c, check.FitsTypeOf, true, "", 1, 0) + testCheck(c, check.FitsTypeOf, false, "", 1, int64(0)) + + // Aliases + testCheck(c, check.FitsTypeOf, false, "", 1, errors.New("")) + testCheck(c, check.FitsTypeOf, false, "", "error", errors.New("")) + testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New("")) + + // Structures + testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{}) + testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{}) + testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{}) + testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{}) + + // Some bad values + testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil)) + testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0) +} + +func (s *CheckersS) TestImplements(c *check.C) { + testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"}) + + var e error + var re runtime.Error + testCheck(c, check.Implements, true, "", errors.New(""), &e) + testCheck(c, check.Implements, false, "", errors.New(""), &re) + + // Some bad values + testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New("")) + testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil)) + testCheck(c, check.Implements, false, "", interface{}(nil), &e) +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go new file mode 100644 index 0000000..0e6cfe0 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go @@ -0,0 +1,9 @@ +package check + +func PrintLine(filename string, line int) (string, error) { + return printLine(filename, line) +} + +func Indent(s, with string) string { + return indent(s, with) +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go new file mode 100644 index 0000000..0b2e7b5 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go @@ -0,0 +1,479 @@ +// Tests for the behavior of the test fixture system. + +package check_test + +import ( + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +// ----------------------------------------------------------------------- +// Fixture test suite. + +type FixtureS struct{} + +var fixtureS = Suite(&FixtureS{}) + +func (s *FixtureS) TestCountSuite(c *C) { + suitesRun += 1 +} + +// ----------------------------------------------------------------------- +// Basic fixture ordering verification. + +func (s *FixtureS) TestOrder(c *C) { + helper := FixtureHelper{} + Run(&helper, nil) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) +} + +// ----------------------------------------------------------------------- +// Check the behavior when panics occur within tests and fixtures. + +func (s *FixtureS) TestPanicOnTest(c *C) { + helper := FixtureHelper{panicOn: "Test1"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" + + "\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.Test1\n$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnSetUpTest(c *C) { + helper := FixtureHelper{panicOn: "SetUpTest"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "TearDownTest") + c.Check(helper.calls[3], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 4) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper\\.SetUpTest\n\n" + + "\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.SetUpTest\n" + + "\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: Fixture has panicked " + + "\\(see related PANIC\\)\n$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnTearDownTest(c *C) { + helper := FixtureHelper{panicOn: "TearDownTest"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 5) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper.TearDownTest\n\n" + + "\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.TearDownTest\n" + + "\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: Fixture has panicked " + + "\\(see related PANIC\\)\n$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnSetUpSuite(c *C) { + helper := FixtureHelper{panicOn: "SetUpSuite"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 2) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper.SetUpSuite\n\n" + + "\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.SetUpSuite\n$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnTearDownSuite(c *C) { + helper := FixtureHelper{panicOn: "TearDownSuite"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper.TearDownSuite\n\n" + + "\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.TearDownSuite\n$" + + c.Check(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// A wrong argument on a test or fixture will produce a nice error. + +func (s *FixtureS) TestPanicOnWrongTestArg(c *C) { + helper := WrongTestArgHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "TearDownTest") + c.Check(helper.calls[3], Equals, "SetUpTest") + c.Check(helper.calls[4], Equals, "Test2") + c.Check(helper.calls[5], Equals, "TearDownTest") + c.Check(helper.calls[6], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 7) + + expected := "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongTestArgHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) { + helper := WrongSetUpTestArgHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpTestArgHelper\\.SetUpTest\n\n" + + "\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) { + helper := WrongSetUpSuiteArgHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" + + "\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Nice errors also when tests or fixture have wrong arg count. + +func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) { + helper := WrongTestArgCountHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "TearDownTest") + c.Check(helper.calls[3], Equals, "SetUpTest") + c.Check(helper.calls[4], Equals, "Test2") + c.Check(helper.calls[5], Equals, "TearDownTest") + c.Check(helper.calls[6], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 7) + + expected := "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongTestArgCountHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) { + helper := WrongSetUpTestArgCountHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" + + "\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) { + helper := WrongSetUpSuiteArgCountHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" + + "\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" + + "\\.SetUpSuite argument should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Helper test suites with wrong function arguments. + +type WrongTestArgHelper struct { + FixtureHelper +} + +func (s *WrongTestArgHelper) Test1(t int) { +} + +type WrongSetUpTestArgHelper struct { + FixtureHelper +} + +func (s *WrongSetUpTestArgHelper) SetUpTest(t int) { +} + +type WrongSetUpSuiteArgHelper struct { + FixtureHelper +} + +func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) { +} + +type WrongTestArgCountHelper struct { + FixtureHelper +} + +func (s *WrongTestArgCountHelper) Test1(c *C, i int) { +} + +type WrongSetUpTestArgCountHelper struct { + FixtureHelper +} + +func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) { +} + +type WrongSetUpSuiteArgCountHelper struct { + FixtureHelper +} + +func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) { +} + +// ----------------------------------------------------------------------- +// Ensure fixture doesn't run without tests. + +type NoTestsHelper struct { + hasRun bool +} + +func (s *NoTestsHelper) SetUpSuite(c *C) { + s.hasRun = true +} + +func (s *NoTestsHelper) TearDownSuite(c *C) { + s.hasRun = true +} + +func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) { + helper := NoTestsHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.hasRun, Equals, false) +} + +// ----------------------------------------------------------------------- +// Verify that checks and assertions work correctly inside the fixture. + +type FixtureCheckHelper struct { + fail string + completed bool +} + +func (s *FixtureCheckHelper) SetUpSuite(c *C) { + switch s.fail { + case "SetUpSuiteAssert": + c.Assert(false, Equals, true) + case "SetUpSuiteCheck": + c.Check(false, Equals, true) + } + s.completed = true +} + +func (s *FixtureCheckHelper) SetUpTest(c *C) { + switch s.fail { + case "SetUpTestAssert": + c.Assert(false, Equals, true) + case "SetUpTestCheck": + c.Check(false, Equals, true) + } + s.completed = true +} + +func (s *FixtureCheckHelper) Test(c *C) { + // Do nothing. +} + +func (s *FixtureS) TestSetUpSuiteCheck(c *C) { + helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Matches, + "\n---+\n"+ + "FAIL: fixture_test\\.go:[0-9]+: "+ + "FixtureCheckHelper\\.SetUpSuite\n\n"+ + "fixture_test\\.go:[0-9]+:\n"+ + " c\\.Check\\(false, Equals, true\\)\n"+ + "\\.+ obtained bool = false\n"+ + "\\.+ expected bool = true\n\n") + c.Assert(helper.completed, Equals, true) +} + +func (s *FixtureS) TestSetUpSuiteAssert(c *C) { + helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Matches, + "\n---+\n"+ + "FAIL: fixture_test\\.go:[0-9]+: "+ + "FixtureCheckHelper\\.SetUpSuite\n\n"+ + "fixture_test\\.go:[0-9]+:\n"+ + " c\\.Assert\\(false, Equals, true\\)\n"+ + "\\.+ obtained bool = false\n"+ + "\\.+ expected bool = true\n\n") + c.Assert(helper.completed, Equals, false) +} + +// ----------------------------------------------------------------------- +// Verify that logging within SetUpTest() persists within the test log itself. + +type FixtureLogHelper struct { + c *C +} + +func (s *FixtureLogHelper) SetUpTest(c *C) { + s.c = c + c.Log("1") +} + +func (s *FixtureLogHelper) Test(c *C) { + c.Log("2") + s.c.Log("3") + c.Log("4") + c.Fail() +} + +func (s *FixtureLogHelper) TearDownTest(c *C) { + s.c.Log("5") +} + +func (s *FixtureS) TestFixtureLogging(c *C) { + helper := FixtureLogHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Matches, + "\n---+\n"+ + "FAIL: fixture_test\\.go:[0-9]+: "+ + "FixtureLogHelper\\.Test\n\n"+ + "1\n2\n3\n4\n5\n") +} + +// ----------------------------------------------------------------------- +// Skip() within fixture methods. + +func (s *FixtureS) TestSkipSuite(c *C) { + helper := FixtureHelper{skip: true, skipOnN: 0} + output := String{} + result := Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Equals, "") + c.Assert(helper.calls[0], Equals, "SetUpSuite") + c.Assert(helper.calls[1], Equals, "TearDownSuite") + c.Assert(len(helper.calls), Equals, 2) + c.Assert(result.Skipped, Equals, 2) +} + +func (s *FixtureS) TestSkipTest(c *C) { + helper := FixtureHelper{skip: true, skipOnN: 1} + output := String{} + result := Run(&helper, &RunConf{Output: &output}) + c.Assert(helper.calls[0], Equals, "SetUpSuite") + c.Assert(helper.calls[1], Equals, "SetUpTest") + c.Assert(helper.calls[2], Equals, "SetUpTest") + c.Assert(helper.calls[3], Equals, "Test2") + c.Assert(helper.calls[4], Equals, "TearDownTest") + c.Assert(helper.calls[5], Equals, "TearDownSuite") + c.Assert(len(helper.calls), Equals, 6) + c.Assert(result.Skipped, Equals, 1) +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go new file mode 100644 index 0000000..351a4e8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go @@ -0,0 +1,335 @@ +// These tests check that the foundations of gocheck are working properly. +// They already assume that fundamental failing is working already, though, +// since this was tested in bootstrap_test.go. Even then, some care may +// still have to be taken when using external functions, since they should +// of course not rely on functionality tested here. + +package check_test + +import ( + "fmt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" + "log" + "os" + "regexp" + "strings" +) + +// ----------------------------------------------------------------------- +// Foundation test suite. + +type FoundationS struct{} + +var foundationS = check.Suite(&FoundationS{}) + +func (s *FoundationS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +func (s *FoundationS) TestErrorf(c *check.C) { + // Do not use checkState() here. It depends on Errorf() working. + expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c.Errorf(\"Error %%v!\", \"message\")\n"+ + "... Error: Error message!\n\n", + getMyLine()+1) + c.Errorf("Error %v!", "message") + failed := c.Failed() + c.Succeed() + if log := c.GetTestLog(); log != expectedLog { + c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog) + c.Fail() + } + if !failed { + c.Logf("Errorf() didn't put the test in a failed state") + c.Fail() + } +} + +func (s *FoundationS) TestError(c *check.C) { + expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c\\.Error\\(\"Error \", \"message!\"\\)\n"+ + "\\.\\.\\. Error: Error message!\n\n", + getMyLine()+1) + c.Error("Error ", "message!") + checkState(c, nil, + &expectedState{ + name: "Error(`Error `, `message!`)", + failed: true, + log: expectedLog, + }) +} + +func (s *FoundationS) TestFailNow(c *check.C) { + defer (func() { + if !c.Failed() { + c.Error("FailNow() didn't fail the test") + } else { + c.Succeed() + if c.GetTestLog() != "" { + c.Error("Something got logged:\n" + c.GetTestLog()) + } + } + })() + + c.FailNow() + c.Log("FailNow() didn't stop the test") +} + +func (s *FoundationS) TestSucceedNow(c *check.C) { + defer (func() { + if c.Failed() { + c.Error("SucceedNow() didn't succeed the test") + } + if c.GetTestLog() != "" { + c.Error("Something got logged:\n" + c.GetTestLog()) + } + })() + + c.Fail() + c.SucceedNow() + c.Log("SucceedNow() didn't stop the test") +} + +func (s *FoundationS) TestFailureHeader(c *check.C) { + output := String{} + failHelper := FailHelper{} + check.Run(&failHelper, &check.RunConf{Output: &output}) + header := fmt.Sprintf(""+ + "\n-----------------------------------"+ + "-----------------------------------\n"+ + "FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n", + failHelper.testLine) + if strings.Index(output.value, header) == -1 { + c.Errorf(""+ + "Failure didn't print a proper header.\n"+ + "... Got:\n%s... Expected something with:\n%s", + output.value, header) + } +} + +func (s *FoundationS) TestFatal(c *check.C) { + var line int + defer (func() { + if !c.Failed() { + c.Error("Fatal() didn't fail the test") + } else { + c.Succeed() + expected := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c.Fatal(\"Die \", \"now!\")\n"+ + "... Error: Die now!\n\n", + line) + if c.GetTestLog() != expected { + c.Error("Incorrect log:", c.GetTestLog()) + } + } + })() + + line = getMyLine() + 1 + c.Fatal("Die ", "now!") + c.Log("Fatal() didn't stop the test") +} + +func (s *FoundationS) TestFatalf(c *check.C) { + var line int + defer (func() { + if !c.Failed() { + c.Error("Fatalf() didn't fail the test") + } else { + c.Succeed() + expected := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c.Fatalf(\"Die %%s!\", \"now\")\n"+ + "... Error: Die now!\n\n", + line) + if c.GetTestLog() != expected { + c.Error("Incorrect log:", c.GetTestLog()) + } + } + })() + + line = getMyLine() + 1 + c.Fatalf("Die %s!", "now") + c.Log("Fatalf() didn't stop the test") +} + +func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) { + log := fmt.Sprintf(""+ + "foundation_test.go:%d:\n"+ + " result := c.Check\\(10, check.Equals, 20\\)\n"+ + "\\.\\.\\. obtained int = 10\n"+ + "\\.\\.\\. expected int = 20\n\n", + getMyLine()+1) + result := c.Check(10, check.Equals, 20) + checkState(c, result, + &expectedState{ + name: "Check(10, Equals, 20)", + result: false, + failed: true, + log: log, + }) +} + +func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) { + result, line := checkEqualWrapper(c, 10, 20) + testLine := getMyLine() - 1 + log := fmt.Sprintf(""+ + "foundation_test.go:%d:\n"+ + " result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+ + "check_test.go:%d:\n"+ + " return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+ + "\\.\\.\\. obtained int = 10\n"+ + "\\.\\.\\. expected int = 20\n\n", + testLine, line) + checkState(c, result, + &expectedState{ + name: "Check(10, Equals, 20)", + result: false, + failed: true, + log: log, + }) +} + +// ----------------------------------------------------------------------- +// ExpectFailure() inverts the logic of failure. + +type ExpectFailureSucceedHelper struct{} + +func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) { + c.ExpectFailure("It booms!") + c.Error("Boom!") +} + +type ExpectFailureFailHelper struct{} + +func (s *ExpectFailureFailHelper) TestFail(c *check.C) { + c.ExpectFailure("Bug #XYZ") +} + +func (s *FoundationS) TestExpectFailureFail(c *check.C) { + helper := ExpectFailureFailHelper{} + output := String{} + result := check.Run(&helper, &check.RunConf{Output: &output}) + + expected := "" + + "^\n-+\n" + + "FAIL: foundation_test\\.go:[0-9]+:" + + " ExpectFailureFailHelper\\.TestFail\n\n" + + "\\.\\.\\. Error: Test succeeded, but was expected to fail\n" + + "\\.\\.\\. Reason: Bug #XYZ\n$" + + matched, err := regexp.MatchString(expected, output.value) + if err != nil { + c.Error("Bad expression: ", expected) + } else if !matched { + c.Error("ExpectFailure() didn't log properly:\n", output.value) + } + + c.Assert(result.ExpectedFailures, check.Equals, 0) +} + +func (s *FoundationS) TestExpectFailureSucceed(c *check.C) { + helper := ExpectFailureSucceedHelper{} + output := String{} + result := check.Run(&helper, &check.RunConf{Output: &output}) + + c.Assert(output.value, check.Equals, "") + c.Assert(result.ExpectedFailures, check.Equals, 1) +} + +func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) { + helper := ExpectFailureSucceedHelper{} + output := String{} + result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) + + expected := "" + + "FAIL EXPECTED: foundation_test\\.go:[0-9]+:" + + " ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n" + + matched, err := regexp.MatchString(expected, output.value) + if err != nil { + c.Error("Bad expression: ", expected) + } else if !matched { + c.Error("ExpectFailure() didn't log properly:\n", output.value) + } + + c.Assert(result.ExpectedFailures, check.Equals, 1) +} + +// ----------------------------------------------------------------------- +// Skip() allows stopping a test without positive/negative results. + +type SkipTestHelper struct{} + +func (s *SkipTestHelper) TestFail(c *check.C) { + c.Skip("Wrong platform or whatever") + c.Error("Boom!") +} + +func (s *FoundationS) TestSkip(c *check.C) { + helper := SkipTestHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output}) + + if output.value != "" { + c.Error("Skip() logged something:\n", output.value) + } +} + +func (s *FoundationS) TestSkipVerbose(c *check.C) { + helper := SkipTestHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) + + expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" + + " \\(Wrong platform or whatever\\)" + matched, err := regexp.MatchString(expected, output.value) + if err != nil { + c.Error("Bad expression: ", expected) + } else if !matched { + c.Error("Skip() didn't log properly:\n", output.value) + } +} + +// ----------------------------------------------------------------------- +// Check minimum *log.Logger interface provided by *check.C. + +type minLogger interface { + Output(calldepth int, s string) error +} + +func (s *BootstrapS) TestMinLogger(c *check.C) { + var logger minLogger + logger = log.New(os.Stderr, "", 0) + logger = c + logger.Output(0, "Hello there") + expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n` + output := c.GetTestLog() + c.Assert(output, check.Matches, expected) +} + +// ----------------------------------------------------------------------- +// Ensure that suites with embedded types are working fine, including the +// the workaround for issue 906. + +type EmbeddedInternalS struct { + called bool +} + +type EmbeddedS struct { + EmbeddedInternalS +} + +var embeddedS = check.Suite(&EmbeddedS{}) + +func (s *EmbeddedS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +func (s *EmbeddedInternalS) TestMethod(c *check.C) { + c.Error("TestMethod() of the embedded type was called!?") +} + +func (s *EmbeddedS) TestMethod(c *check.C) { + // http://code.google.com/p/go/issues/detail?id=906 + c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner? + s.called = true +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go b/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go new file mode 100644 index 0000000..4b6c26d --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go @@ -0,0 +1,231 @@ +package check + +import ( + "fmt" + "strings" + "time" +) + +// TestName returns the current test name in the form "SuiteName.TestName" +func (c *C) TestName() string { + return c.testName +} + +// ----------------------------------------------------------------------- +// Basic succeeding/failing logic. + +// Failed returns whether the currently running test has already failed. +func (c *C) Failed() bool { + return c.status == failedSt +} + +// Fail marks the currently running test as failed. +// +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) Fail() { + c.status = failedSt +} + +// FailNow marks the currently running test as failed and stops running it. +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) FailNow() { + c.Fail() + c.stopNow() +} + +// Succeed marks the currently running test as succeeded, undoing any +// previous failures. +func (c *C) Succeed() { + c.status = succeededSt +} + +// SucceedNow marks the currently running test as succeeded, undoing any +// previous failures, and stops running the test. +func (c *C) SucceedNow() { + c.Succeed() + c.stopNow() +} + +// ExpectFailure informs that the running test is knowingly broken for +// the provided reason. If the test does not fail, an error will be reported +// to raise attention to this fact. This method is useful to temporarily +// disable tests which cover well known problems until a better time to +// fix the problem is found, without forgetting about the fact that a +// failure still exists. +func (c *C) ExpectFailure(reason string) { + if reason == "" { + panic("Missing reason why the test is expected to fail") + } + c.mustFail = true + c.reason = reason +} + +// Skip skips the running test for the provided reason. If run from within +// SetUpTest, the individual test being set up will be skipped, and if run +// from within SetUpSuite, the whole suite is skipped. +func (c *C) Skip(reason string) { + if reason == "" { + panic("Missing reason why the test is being skipped") + } + c.reason = reason + c.status = skippedSt + c.stopNow() +} + +// ----------------------------------------------------------------------- +// Basic logging. + +// GetTestLog returns the current test error output. +func (c *C) GetTestLog() string { + return c.logb.String() +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Log(args ...interface{}) { + c.log(args...) +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Logf(format string, args ...interface{}) { + c.logf(format, args...) +} + +// Output enables *C to be used as a logger in functions that require only +// the minimum interface of *log.Logger. +func (c *C) Output(calldepth int, s string) error { + d := time.Now().Sub(c.startTime) + msec := d / time.Millisecond + sec := d / time.Second + min := d / time.Minute + + c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) + return nil +} + +// Error logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Error(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.Fail() +} + +// Errorf logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Errorf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprintf("Error: "+format, args...)) + c.logNewLine() + c.Fail() +} + +// Fatal logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprint. +func (c *C) Fatal(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.FailNow() +} + +// Fatlaf logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprintf. +func (c *C) Fatalf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) + c.logNewLine() + c.FailNow() +} + +// ----------------------------------------------------------------------- +// Generic checks and assertions based on checkers. + +// Check verifies if the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution continues. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { + return c.internalCheck("Check", obtained, checker, args...) +} + +// Assert ensures that the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution stops. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { + if !c.internalCheck("Assert", obtained, checker, args...) { + c.stopNow() + } +} + +func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { + if checker == nil { + c.logCaller(2) + c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) + c.logString("Oops.. you've provided a nil checker!") + c.logNewLine() + c.Fail() + return false + } + + // If the last argument is a bug info, extract it out. + var comment CommentInterface + if len(args) > 0 { + if c, ok := args[len(args)-1].(CommentInterface); ok { + comment = c + args = args[:len(args)-1] + } + } + + params := append([]interface{}{obtained}, args...) + info := checker.Info() + + if len(params) != len(info.Params) { + names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) + c.logCaller(2) + c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) + c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) + c.logNewLine() + c.Fail() + return false + } + + // Copy since it may be mutated by Check. + names := append([]string{}, info.Params...) + + // Do the actual check. + result, error := checker.Check(params, names) + if !result || error != "" { + c.logCaller(2) + for i := 0; i != len(params); i++ { + c.logValue(names[i], params[i]) + } + if comment != nil { + c.logString(comment.CheckCommentString()) + } + if error != "" { + c.logString(error) + } + c.logNewLine() + c.Fail() + return false + } + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go new file mode 100644 index 0000000..e999268 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go @@ -0,0 +1,519 @@ +// These tests verify the inner workings of the helper methods associated +// with check.T. + +package check_test + +import ( + "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" + "os" + "reflect" + "runtime" + "sync" +) + +var helpersS = check.Suite(&HelpersS{}) + +type HelpersS struct{} + +func (s *HelpersS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +// ----------------------------------------------------------------------- +// Fake checker and bug info to verify the behavior of Assert() and Check(). + +type MyChecker struct { + info *check.CheckerInfo + params []interface{} + names []string + result bool + error string +} + +func (checker *MyChecker) Info() *check.CheckerInfo { + if checker.info == nil { + return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}} + } + return checker.info +} + +func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) { + rparams := checker.params + rnames := checker.names + checker.params = append([]interface{}{}, params...) + checker.names = append([]string{}, names...) + if rparams != nil { + copy(params, rparams) + } + if rnames != nil { + copy(names, rnames) + } + return checker.result, checker.error +} + +type myCommentType string + +func (c myCommentType) CheckCommentString() string { + return string(c) +} + +func myComment(s string) myCommentType { + return myCommentType(s) +} + +// ----------------------------------------------------------------------- +// Ensure a real checker actually works fine. + +func (s *HelpersS) TestCheckerInterface(c *check.C) { + testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} { + return c.Check(1, check.Equals, 1) + }) +} + +// ----------------------------------------------------------------------- +// Tests for Check(), mostly the same as for Assert() following these. + +func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) { + checker := &MyChecker{result: true} + testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} { + return c.Check(1, checker, 2) + }) + if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) { + checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + testHelperSuccess(c, "Check(1, checker)", true, func() interface{} { + return c.Check(1, checker) + }) + if !reflect.DeepEqual(checker.params, []interface{}{1}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestCheckFailWithExpected(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n\n" + testHelperFailure(c, "Check(1, checker, 2)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2) + }) +} + +func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2, myComment("Hello world!")) + }) +} + +func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " // Nice leading comment\\.\n" + + " return c\\.Check\\(1, checker, 2\\) // Hello there\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n\n" + testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, + func() interface{} { + // Nice leading comment. + return c.Check(1, checker, 2) // Hello there + }) +} + +func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker\\)\n" + + "\\.+ myvalue int = 1\n\n" + testHelperFailure(c, "Check(1, checker)", false, false, log, + func() interface{} { + return c.Check(1, checker) + }) +} + +func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myvalue int = 1\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Check(1, checker, msg)", false, false, log, + func() interface{} { + return c.Check(1, checker, myComment("Hello world!")) + }) +} + +func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) { + checker := &MyChecker{result: true} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker\\)\n" + + "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + + "\\.+ Wrong number of parameters for MyChecker: " + + "want 3, got 2\n\n" + testHelperFailure(c, "Check(1, checker, !?)", false, false, log, + func() interface{} { + return c.Check(1, checker) + }) +} + +func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) { + checker := &MyChecker{result: true} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2, 3\\)\n" + + "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + + "\\.+ Wrong number of parameters for MyChecker: " + + "want 3, got 4\n\n" + testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2, 3) + }) +} + +func (s *HelpersS) TestCheckWithError(c *check.C) { + checker := &MyChecker{result: false, error: "Some not so cool data provided!"} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Some not so cool data provided!\n\n" + testHelperFailure(c, "Check(1, checker, 2)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2) + }) +} + +func (s *HelpersS) TestCheckWithNilChecker(c *check.C) { + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, nil\\)\n" + + "\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + + "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" + testHelperFailure(c, "Check(obtained, nil)", false, false, log, + func() interface{} { + return c.Check(1, nil) + }) +} + +func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) { + checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2\\)\n" + + "\\.+ newobtained int = 3\n" + + "\\.+ newexpected int = 4\n\n" + testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log, + func() interface{} { + return c.Check(1, checker, 2) + }) +} + +// ----------------------------------------------------------------------- +// Tests for Assert(), mostly the same as for Check() above. + +func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) { + checker := &MyChecker{result: true} + testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} { + c.Assert(1, checker, 2) + return nil + }) + if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) { + checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} { + c.Assert(1, checker) + return nil + }) + if !reflect.DeepEqual(checker.params, []interface{}{1}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestAssertFailWithExpected(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n\n" + testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, + func() interface{} { + c.Assert(1, checker, 2) + return nil + }) +} + +func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log, + func() interface{} { + c.Assert(1, checker, 2, myComment("Hello world!")) + return nil + }) +} + +func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker\\)\n" + + "\\.+ myvalue int = 1\n\n" + testHelperFailure(c, "Assert(1, checker)", nil, true, log, + func() interface{} { + c.Assert(1, checker) + return nil + }) +} + +func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myvalue int = 1\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log, + func() interface{} { + c.Assert(1, checker, myComment("Hello world!")) + return nil + }) +} + +func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) { + checker := &MyChecker{result: true} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker\\)\n" + + "\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" + + "\\.+ Wrong number of parameters for MyChecker: " + + "want 3, got 2\n\n" + testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log, + func() interface{} { + c.Assert(1, checker) + return nil + }) +} + +func (s *HelpersS) TestAssertWithError(c *check.C) { + checker := &MyChecker{result: false, error: "Some not so cool data provided!"} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Some not so cool data provided!\n\n" + testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, + func() interface{} { + c.Assert(1, checker, 2) + return nil + }) +} + +func (s *HelpersS) TestAssertWithNilChecker(c *check.C) { + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, nil\\)\n" + + "\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + + "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" + testHelperFailure(c, "Assert(obtained, nil)", nil, true, log, + func() interface{} { + c.Assert(1, nil) + return nil + }) +} + +// ----------------------------------------------------------------------- +// Ensure that values logged work properly in some interesting cases. + +func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + + " return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" + + "\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" + + "\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n" + testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log, + func() interface{} { + return c.Check([]byte{1, 2}, checker, []byte{1, 3}) + }) +} + +func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + + " return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" + + "\\.+ myobtained string = \"\" \\+\n" + + "\\.+ \"a\\\\n\" \\+\n" + + "\\.+ \"b\\\\n\"\n" + + "\\.+ myexpected string = \"\" \\+\n" + + "\\.+ \"a\\\\n\" \\+\n" + + "\\.+ \"b\\\\n\" \\+\n" + + "\\.+ \"c\"\n\n" + testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log, + func() interface{} { + return c.Check("a\nb\n", checker, "a\nb\nc") + }) +} + +func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) { + // If the newline is at the end of the string, don't log as multi-line. + checker := &MyChecker{result: false} + log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + + " return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" + + "\\.+ myobtained string = \"a b\\\\n\"\n" + + "\\.+ myexpected string = \"\" \\+\n" + + "\\.+ \"a\\\\n\" \\+\n" + + "\\.+ \"b\"\n\n" + testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log, + func() interface{} { + return c.Check("a b\n", checker, "a\nb") + }) +} + +// ----------------------------------------------------------------------- +// MakeDir() tests. + +type MkDirHelper struct { + path1 string + path2 string + isDir1 bool + isDir2 bool + isDir3 bool + isDir4 bool +} + +func (s *MkDirHelper) SetUpSuite(c *check.C) { + s.path1 = c.MkDir() + s.isDir1 = isDir(s.path1) +} + +func (s *MkDirHelper) Test(c *check.C) { + s.path2 = c.MkDir() + s.isDir2 = isDir(s.path2) +} + +func (s *MkDirHelper) TearDownSuite(c *check.C) { + s.isDir3 = isDir(s.path1) + s.isDir4 = isDir(s.path2) +} + +func (s *HelpersS) TestMkDir(c *check.C) { + helper := MkDirHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output}) + c.Assert(output.value, check.Equals, "") + c.Check(helper.isDir1, check.Equals, true) + c.Check(helper.isDir2, check.Equals, true) + c.Check(helper.isDir3, check.Equals, true) + c.Check(helper.isDir4, check.Equals, true) + c.Check(helper.path1, check.Not(check.Equals), + helper.path2) + c.Check(isDir(helper.path1), check.Equals, false) + c.Check(isDir(helper.path2), check.Equals, false) +} + +func isDir(path string) bool { + if stat, err := os.Stat(path); err == nil { + return stat.IsDir() + } + return false +} + +// Concurrent logging should not corrupt the underling buffer. +// Use go test -race to detect the race in this test. +func (s *HelpersS) TestConcurrentLogging(c *check.C) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) + var start, stop sync.WaitGroup + start.Add(1) + for i, n := 0, runtime.NumCPU()*2; i < n; i++ { + stop.Add(1) + go func(i int) { + start.Wait() + for j := 0; j < 30; j++ { + c.Logf("Worker %d: line %d", i, j) + } + stop.Done() + }(i) + } + start.Done() + stop.Wait() +} + +// ----------------------------------------------------------------------- +// Test the TestName function + +type TestNameHelper struct { + name1 string + name2 string + name3 string + name4 string + name5 string +} + +func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() } +func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() } +func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() } +func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() } +func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() } + +func (s *HelpersS) TestTestName(c *check.C) { + helper := TestNameHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output}) + c.Check(helper.name1, check.Equals, "") + c.Check(helper.name2, check.Equals, "TestNameHelper.Test") + c.Check(helper.name3, check.Equals, "TestNameHelper.Test") + c.Check(helper.name4, check.Equals, "TestNameHelper.Test") + c.Check(helper.name5, check.Equals, "") +} + +// ----------------------------------------------------------------------- +// A couple of helper functions to test helper functions. :-) + +func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) { + var result interface{} + defer (func() { + if err := recover(); err != nil { + panic(err) + } + checkState(c, result, + &expectedState{ + name: name, + result: expectedResult, + failed: false, + log: "", + }) + })() + result = closure() +} + +func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) { + var result interface{} + defer (func() { + if err := recover(); err != nil { + panic(err) + } + checkState(c, result, + &expectedState{ + name: name, + result: expectedResult, + failed: true, + log: log, + }) + })() + result = closure() + if shouldStop { + c.Logf("%s didn't stop when it should", name) + } +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/printer.go b/Godeps/_workspace/src/gopkg.in/check.v1/printer.go new file mode 100644 index 0000000..e0f7557 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/printer.go @@ -0,0 +1,168 @@ +package check + +import ( + "bytes" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "os" +) + +func indent(s, with string) (r string) { + eol := true + for i := 0; i != len(s); i++ { + c := s[i] + switch { + case eol && c == '\n' || c == '\r': + case c == '\n' || c == '\r': + eol = true + case eol: + eol = false + s = s[:i] + with + s[i:] + i += len(with) + } + } + return s +} + +func printLine(filename string, line int) (string, error) { + fset := token.NewFileSet() + file, err := os.Open(filename) + if err != nil { + return "", err + } + fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) + if err != nil { + return "", err + } + config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} + lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} + ast.Walk(lp, fnode) + result := lp.output.Bytes() + // Comments leave \n at the end. + n := len(result) + for n > 0 && result[n-1] == '\n' { + n-- + } + return string(result[:n]), nil +} + +type linePrinter struct { + config *printer.Config + fset *token.FileSet + fnode *ast.File + line int + output bytes.Buffer + stmt ast.Stmt +} + +func (lp *linePrinter) emit() bool { + if lp.stmt != nil { + lp.trim(lp.stmt) + lp.printWithComments(lp.stmt) + lp.stmt = nil + return true + } + return false +} + +func (lp *linePrinter) printWithComments(n ast.Node) { + nfirst := lp.fset.Position(n.Pos()).Line + nlast := lp.fset.Position(n.End()).Line + for _, g := range lp.fnode.Comments { + cfirst := lp.fset.Position(g.Pos()).Line + clast := lp.fset.Position(g.End()).Line + if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { + for _, c := range g.List { + lp.output.WriteString(c.Text) + lp.output.WriteByte('\n') + } + } + if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { + // The printer will not include the comment if it starts past + // the node itself. Trick it into printing by overlapping the + // slash with the end of the statement. + g.List[0].Slash = n.End() - 1 + } + } + node := &printer.CommentedNode{n, lp.fnode.Comments} + lp.config.Fprint(&lp.output, lp.fset, node) +} + +func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { + if n == nil { + if lp.output.Len() == 0 { + lp.emit() + } + return nil + } + first := lp.fset.Position(n.Pos()).Line + last := lp.fset.Position(n.End()).Line + if first <= lp.line && last >= lp.line { + // Print the innermost statement containing the line. + if stmt, ok := n.(ast.Stmt); ok { + if _, ok := n.(*ast.BlockStmt); !ok { + lp.stmt = stmt + } + } + if first == lp.line && lp.emit() { + return nil + } + return lp + } + return nil +} + +func (lp *linePrinter) trim(n ast.Node) bool { + stmt, ok := n.(ast.Stmt) + if !ok { + return true + } + line := lp.fset.Position(n.Pos()).Line + if line != lp.line { + return false + } + switch stmt := stmt.(type) { + case *ast.IfStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.SwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.TypeSwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.CaseClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.CommClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.BlockStmt: + stmt.List = lp.trimList(stmt.List) + } + return true +} + +func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { + if !lp.trim(stmt) { + return lp.emptyBlock(stmt) + } + stmt.Rbrace = stmt.Lbrace + return stmt +} + +func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { + for i := 0; i != len(stmts); i++ { + if !lp.trim(stmts[i]) { + stmts[i] = lp.emptyStmt(stmts[i]) + break + } + } + return stmts +} + +func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { + return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} +} + +func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { + p := n.Pos() + return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go new file mode 100644 index 0000000..9dd1707 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go @@ -0,0 +1,109 @@ +package check_test + +import ( + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" +) + +var _ = Suite(&PrinterS{}) + +type PrinterS struct{} + +func (s *PrinterS) TestCountSuite(c *C) { + suitesRun += 1 +} + +var printTestFuncLine int + +func init() { + printTestFuncLine = getMyLine() + 3 +} + +func printTestFunc() { + println(1) // Comment1 + if 2 == 2 { // Comment2 + println(3) // Comment3 + } + switch 5 { + case 6: + println(6) // Comment6 + println(7) + } + switch interface{}(9).(type) { // Comment9 + case int: + println(10) + println(11) + } + select { + case <-(chan bool)(nil): + println(14) + println(15) + default: + println(16) + println(17) + } + println(19, + 20) + _ = func() { + println(21) + println(22) + } + println(24, func() { + println(25) + }) + // Leading comment + // with multiple lines. + println(29) // Comment29 +} + +var printLineTests = []struct { + line int + output string +}{ + {1, "println(1) // Comment1"}, + {2, "if 2 == 2 { // Comment2\n ...\n}"}, + {3, "println(3) // Comment3"}, + {5, "switch 5 {\n...\n}"}, + {6, "case 6:\n println(6) // Comment6\n ..."}, + {7, "println(7)"}, + {9, "switch interface{}(9).(type) { // Comment9\n...\n}"}, + {10, "case int:\n println(10)\n ..."}, + {14, "case <-(chan bool)(nil):\n println(14)\n ..."}, + {15, "println(15)"}, + {16, "default:\n println(16)\n ..."}, + {17, "println(17)"}, + {19, "println(19,\n 20)"}, + {20, "println(19,\n 20)"}, + {21, "_ = func() {\n println(21)\n println(22)\n}"}, + {22, "println(22)"}, + {24, "println(24, func() {\n println(25)\n})"}, + {25, "println(25)"}, + {26, "println(24, func() {\n println(25)\n})"}, + {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"}, +} + +func (s *PrinterS) TestPrintLine(c *C) { + for _, test := range printLineTests { + output, err := PrintLine("printer_test.go", printTestFuncLine+test.line) + c.Assert(err, IsNil) + c.Assert(output, Equals, test.output) + } +} + +var indentTests = []struct { + in, out string +}{ + {"", ""}, + {"\n", "\n"}, + {"a", ">>>a"}, + {"a\n", ">>>a\n"}, + {"a\nb", ">>>a\n>>>b"}, + {" ", ">>> "}, +} + +func (s *PrinterS) TestIndent(c *C) { + for _, test := range indentTests { + out := Indent(test.in, ">>>") + c.Assert(out, Equals, test.out) + } + +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/run.go b/Godeps/_workspace/src/gopkg.in/check.v1/run.go new file mode 100644 index 0000000..da8fd79 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/run.go @@ -0,0 +1,175 @@ +package check + +import ( + "bufio" + "flag" + "fmt" + "os" + "testing" + "time" +) + +// ----------------------------------------------------------------------- +// Test suite registry. + +var allSuites []interface{} + +// Suite registers the given value as a test suite to be run. Any methods +// starting with the Test prefix in the given value will be considered as +// a test method. +func Suite(suite interface{}) interface{} { + allSuites = append(allSuites, suite) + return suite +} + +// ----------------------------------------------------------------------- +// Public running interface. + +var ( + oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") + oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") + oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") + oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") + oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") + oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") + oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") + + newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") + newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") + newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") + newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") + newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") + newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") + newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") + newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") +) + +// TestingT runs all test suites registered with the Suite function, +// printing results to stdout, and reporting any failures back to +// the "testing" package. +func TestingT(testingT *testing.T) { + benchTime := *newBenchTime + if benchTime == 1*time.Second { + benchTime = *oldBenchTime + } + conf := &RunConf{ + Filter: *oldFilterFlag + *newFilterFlag, + Verbose: *oldVerboseFlag || *newVerboseFlag, + Stream: *oldStreamFlag || *newStreamFlag, + Benchmark: *oldBenchFlag || *newBenchFlag, + BenchmarkTime: benchTime, + BenchmarkMem: *newBenchMem, + KeepWorkDir: *oldWorkFlag || *newWorkFlag, + } + if *oldListFlag || *newListFlag { + w := bufio.NewWriter(os.Stdout) + for _, name := range ListAll(conf) { + fmt.Fprintln(w, name) + } + w.Flush() + return + } + result := RunAll(conf) + println(result.String()) + if !result.Passed() { + testingT.Fail() + } +} + +// RunAll runs all test suites registered with the Suite function, using the +// provided run configuration. +func RunAll(runConf *RunConf) *Result { + result := Result{} + for _, suite := range allSuites { + result.Add(Run(suite, runConf)) + } + return &result +} + +// Run runs the provided test suite using the provided run configuration. +func Run(suite interface{}, runConf *RunConf) *Result { + runner := newSuiteRunner(suite, runConf) + return runner.run() +} + +// ListAll returns the names of all the test functions registered with the +// Suite function that will be run with the provided run configuration. +func ListAll(runConf *RunConf) []string { + var names []string + for _, suite := range allSuites { + names = append(names, List(suite, runConf)...) + } + return names +} + +// List returns the names of the test functions in the given +// suite that will be run with the provided run configuration. +func List(suite interface{}, runConf *RunConf) []string { + var names []string + runner := newSuiteRunner(suite, runConf) + for _, t := range runner.tests { + names = append(names, t.String()) + } + return names +} + +// ----------------------------------------------------------------------- +// Result methods. + +func (r *Result) Add(other *Result) { + r.Succeeded += other.Succeeded + r.Skipped += other.Skipped + r.Failed += other.Failed + r.Panicked += other.Panicked + r.FixturePanicked += other.FixturePanicked + r.ExpectedFailures += other.ExpectedFailures + r.Missed += other.Missed + if r.WorkDir != "" && other.WorkDir != "" { + r.WorkDir += ":" + other.WorkDir + } else if other.WorkDir != "" { + r.WorkDir = other.WorkDir + } +} + +func (r *Result) Passed() bool { + return (r.Failed == 0 && r.Panicked == 0 && + r.FixturePanicked == 0 && r.Missed == 0 && + r.RunError == nil) +} + +func (r *Result) String() string { + if r.RunError != nil { + return "ERROR: " + r.RunError.Error() + } + + var value string + if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && + r.Missed == 0 { + value = "OK: " + } else { + value = "OOPS: " + } + value += fmt.Sprintf("%d passed", r.Succeeded) + if r.Skipped != 0 { + value += fmt.Sprintf(", %d skipped", r.Skipped) + } + if r.ExpectedFailures != 0 { + value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) + } + if r.Failed != 0 { + value += fmt.Sprintf(", %d FAILED", r.Failed) + } + if r.Panicked != 0 { + value += fmt.Sprintf(", %d PANICKED", r.Panicked) + } + if r.FixturePanicked != 0 { + value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) + } + if r.Missed != 0 { + value += fmt.Sprintf(", %d MISSED", r.Missed) + } + if r.WorkDir != "" { + value += "\nWORK=" + r.WorkDir + } + return value +} diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go new file mode 100644 index 0000000..62beb0b --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go @@ -0,0 +1,419 @@ +// These tests verify the test running logic. + +package check_test + +import ( + "errors" + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" + "os" + "sync" +) + +var runnerS = Suite(&RunS{}) + +type RunS struct{} + +func (s *RunS) TestCountSuite(c *C) { + suitesRun += 1 +} + +// ----------------------------------------------------------------------- +// Tests ensuring result counting works properly. + +func (s *RunS) TestSuccess(c *C) { + output := String{} + result := Run(&SuccessHelper{}, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 1) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestFailure(c *C) { + output := String{} + result := Run(&FailHelper{}, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 0) + c.Check(result.Failed, Equals, 1) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestFixture(c *C) { + output := String{} + result := Run(&FixtureHelper{}, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 2) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestPanicOnTest(c *C) { + output := String{} + helper := &FixtureHelper{panicOn: "Test1"} + result := Run(helper, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 1) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 1) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestPanicOnSetUpTest(c *C) { + output := String{} + helper := &FixtureHelper{panicOn: "SetUpTest"} + result := Run(helper, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 0) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 1) + c.Check(result.Missed, Equals, 2) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestPanicOnSetUpSuite(c *C) { + output := String{} + helper := &FixtureHelper{panicOn: "SetUpSuite"} + result := Run(helper, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 0) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 1) + c.Check(result.Missed, Equals, 2) + c.Check(result.RunError, IsNil) +} + +// ----------------------------------------------------------------------- +// Check result aggregation. + +func (s *RunS) TestAdd(c *C) { + result := &Result{ + Succeeded: 1, + Skipped: 2, + Failed: 3, + Panicked: 4, + FixturePanicked: 5, + Missed: 6, + ExpectedFailures: 7, + } + result.Add(&Result{ + Succeeded: 10, + Skipped: 20, + Failed: 30, + Panicked: 40, + FixturePanicked: 50, + Missed: 60, + ExpectedFailures: 70, + }) + c.Check(result.Succeeded, Equals, 11) + c.Check(result.Skipped, Equals, 22) + c.Check(result.Failed, Equals, 33) + c.Check(result.Panicked, Equals, 44) + c.Check(result.FixturePanicked, Equals, 55) + c.Check(result.Missed, Equals, 66) + c.Check(result.ExpectedFailures, Equals, 77) + c.Check(result.RunError, IsNil) +} + +// ----------------------------------------------------------------------- +// Check the Passed() method. + +func (s *RunS) TestPassed(c *C) { + c.Assert((&Result{}).Passed(), Equals, true) + c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true) + c.Assert((&Result{Skipped: 1}).Passed(), Equals, true) + c.Assert((&Result{Failed: 1}).Passed(), Equals, false) + c.Assert((&Result{Panicked: 1}).Passed(), Equals, false) + c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false) + c.Assert((&Result{Missed: 1}).Passed(), Equals, false) + c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false) +} + +// ----------------------------------------------------------------------- +// Check that result printing is working correctly. + +func (s *RunS) TestPrintSuccess(c *C) { + result := &Result{Succeeded: 5} + c.Check(result.String(), Equals, "OK: 5 passed") +} + +func (s *RunS) TestPrintFailure(c *C) { + result := &Result{Failed: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED") +} + +func (s *RunS) TestPrintSkipped(c *C) { + result := &Result{Skipped: 5} + c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped") +} + +func (s *RunS) TestPrintExpectedFailures(c *C) { + result := &Result{ExpectedFailures: 5} + c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures") +} + +func (s *RunS) TestPrintPanicked(c *C) { + result := &Result{Panicked: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED") +} + +func (s *RunS) TestPrintFixturePanicked(c *C) { + result := &Result{FixturePanicked: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED") +} + +func (s *RunS) TestPrintMissed(c *C) { + result := &Result{Missed: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED") +} + +func (s *RunS) TestPrintAll(c *C) { + result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3, + Panicked: 4, FixturePanicked: 5, Missed: 6} + c.Check(result.String(), Equals, + "OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+ + "5 FIXTURE-PANICKED, 6 MISSED") +} + +func (s *RunS) TestPrintRunError(c *C) { + result := &Result{Succeeded: 1, Failed: 1, + RunError: errors.New("Kaboom!")} + c.Check(result.String(), Equals, "ERROR: Kaboom!") +} + +// ----------------------------------------------------------------------- +// Verify that the method pattern flag works correctly. + +func (s *RunS) TestFilterTestName(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "Test[91]"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 5) +} + +func (s *RunS) TestFilterTestNameWithAll(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: ".*"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) +} + +func (s *RunS) TestFilterSuiteName(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "FixtureHelper"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) +} + +func (s *RunS) TestFilterSuiteNameAndTestName(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test2") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 5) +} + +func (s *RunS) TestFilterAllOut(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "NotFound"} + Run(&helper, &runConf) + c.Check(len(helper.calls), Equals, 0) +} + +func (s *RunS) TestRequirePartialMatch(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "est"} + Run(&helper, &runConf) + c.Check(len(helper.calls), Equals, 8) +} + +func (s *RunS) TestFilterError(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "]["} + result := Run(&helper, &runConf) + c.Check(result.String(), Equals, + "ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`") + c.Check(len(helper.calls), Equals, 0) +} + +// ----------------------------------------------------------------------- +// Verify that List works correctly. + +func (s *RunS) TestListFiltered(c *C) { + names := List(&FixtureHelper{}, &RunConf{Filter: "1"}) + c.Assert(names, DeepEquals, []string{ + "FixtureHelper.Test1", + }) +} + +func (s *RunS) TestList(c *C) { + names := List(&FixtureHelper{}, &RunConf{}) + c.Assert(names, DeepEquals, []string{ + "FixtureHelper.Test1", + "FixtureHelper.Test2", + }) +} + +// ----------------------------------------------------------------------- +// Verify that verbose mode prints tests which pass as well. + +func (s *RunS) TestVerboseMode(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Verbose: true} + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" + + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" + + c.Assert(output.value, Matches, expected) +} + +func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) { + helper := FixtureHelper{panicOn: "Test1"} + output := String{} + runConf := RunConf{Output: &output, Verbose: true} + Run(&helper, &runConf) + + expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line. + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" + + c.Assert(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Verify the stream output mode. In this mode there's no output caching. + +type StreamHelper struct { + l2 sync.Mutex + l3 sync.Mutex +} + +func (s *StreamHelper) SetUpSuite(c *C) { + c.Log("0") +} + +func (s *StreamHelper) Test1(c *C) { + c.Log("1") + s.l2.Lock() + s.l3.Lock() + go func() { + s.l2.Lock() // Wait for "2". + c.Log("3") + s.l3.Unlock() + }() +} + +func (s *StreamHelper) Test2(c *C) { + c.Log("2") + s.l2.Unlock() + s.l3.Lock() // Wait for "3". + c.Fail() + c.Log("4") +} + +func (s *RunS) TestStreamMode(c *C) { + helper := &StreamHelper{} + output := String{} + runConf := RunConf{Output: &output, Stream: true} + Run(helper, &runConf) + + expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" + + "PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" + + "START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" + + "PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" + + "START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" + + "FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n" + + c.Assert(output.value, Matches, expected) +} + +type StreamMissHelper struct{} + +func (s *StreamMissHelper) SetUpSuite(c *C) { + c.Log("0") + c.Fail() +} + +func (s *StreamMissHelper) Test1(c *C) { + c.Log("1") +} + +func (s *RunS) TestStreamModeWithMiss(c *C) { + helper := &StreamMissHelper{} + output := String{} + runConf := RunConf{Output: &output, Stream: true} + Run(helper, &runConf) + + expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" + + "FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" + + "START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" + + "MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n" + + c.Assert(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Verify that that the keep work dir request indeed does so. + +type WorkDirSuite struct{} + +func (s *WorkDirSuite) Test(c *C) { + c.MkDir() +} + +func (s *RunS) TestKeepWorkDir(c *C) { + output := String{} + runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true} + result := Run(&WorkDirSuite{}, &runConf) + + c.Assert(result.String(), Matches, ".*\nWORK="+result.WorkDir) + + stat, err := os.Stat(result.WorkDir) + c.Assert(err, IsNil) + c.Assert(stat.IsDir(), Equals, true) +} diff --git a/elastigo/datastore.go b/elastigo/datastore.go index aa1ec93..02ae375 100644 --- a/elastigo/datastore.go +++ b/elastigo/datastore.go @@ -6,10 +6,10 @@ import ( "net/url" "strings" - "github.com/codahale/blake2" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/codahale/blake2" ds "github.com/jbenet/datastore.go" - "github.com/mattbaird/elastigo/api" - "github.com/mattbaird/elastigo/core" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/api" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/mattbaird/elastigo/core" ) // Currently, elastigo does not allow connecting to multiple elasticsearch diff --git a/key.go b/key.go index 2cdda91..c655937 100644 --- a/key.go +++ b/key.go @@ -1,7 +1,7 @@ package datastore import ( - "code.google.com/p/go-uuid/uuid" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid" "path" "strings" ) diff --git a/key_test.go b/key_test.go index bbf496a..7e98c36 100644 --- a/key_test.go +++ b/key_test.go @@ -2,12 +2,13 @@ package datastore_test import ( "bytes" - . "github.com/jbenet/datastore.go" - . "launchpad.net/gocheck" "math/rand" "path" "strings" "testing" + + . "github.com/jbenet/datastore.go" + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. diff --git a/leveldb/datastore.go b/leveldb/datastore.go index 072725e..bcc3183 100644 --- a/leveldb/datastore.go +++ b/leveldb/datastore.go @@ -2,8 +2,8 @@ package leveldb import ( ds "github.com/jbenet/datastore.go" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb" + "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" ) // Datastore uses a standard Go map for internal storage. diff --git a/lru/datastore.go b/lru/datastore.go index 51fe604..6bed7ca 100644 --- a/lru/datastore.go +++ b/lru/datastore.go @@ -3,7 +3,7 @@ package lru import ( "errors" - lru "github.com/hashicorp/golang-lru" + lru "github.com/jbenet/datastore.go/Godeps/_workspace/src/github.com/hashicorp/golang-lru" ds "github.com/jbenet/datastore.go" ) diff --git a/lru/datastore_test.go b/lru/datastore_test.go index c029af4..e93eb36 100644 --- a/lru/datastore_test.go +++ b/lru/datastore_test.go @@ -2,12 +2,11 @@ package lru_test import ( "strconv" - "testing" ds "github.com/jbenet/datastore.go" lru "github.com/jbenet/datastore.go/lru" - . "launchpad.net/gocheck" + . "github.com/jbenet/datastore.go/Godeps/_workspace/src/gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner.