diff --git a/bucket.go b/bucket.go index d9b384a2a..9fbc9766c 100644 --- a/bucket.go +++ b/bucket.go @@ -285,19 +285,21 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { + if !bytes.Equal(newKey, k) { return errors.ErrBucketNotFound } else if (flags & common.BucketLeafFlag) == 0 { return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) + child := b.Bucket(newKey) err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) @@ -309,7 +311,7 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -317,7 +319,75 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) + + return nil +} + +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + lg.Debugf("Moving bucket %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", string(key), err) + } else { + lg.Debugf("Moving bucket %q successfully", string(key)) + } + }() + + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + lg.Errorf("An incompatible key %s exists in the source bucket", string(newKey)) + return errors.ErrIncompatibleValue + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b.String(), dstBucket.String()) + return errors.ErrSameBuckets + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(newKey) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + lg.Errorf("An incompatible key %s exists in the target bucket", string(newKey)) + return errors.ErrIncompatibleValue + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(newKey)) + c.node().del(newKey) + + // add te sub-bucket to the destination bucket + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) return nil } diff --git a/errors/errors.go b/errors/errors.go index 9598cbd8a..5709bcf2c 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -69,8 +69,12 @@ var ( // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. ErrValueTooLarge = errors.New("value too large") - // ErrIncompatibleValue is returned when trying create or delete a bucket + // ErrIncompatibleValue is returned when trying to create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") ) diff --git a/movebucket_test.go b/movebucket_test.go new file mode 100644 index 000000000..0b60d95bd --- /dev/null +++ b/movebucket_test.go @@ -0,0 +1,275 @@ +package bbolt_test + +import ( + crand "crypto/rand" + "math/rand" + "os" + "path/filepath" + "testing" + + "go.etcd.io/bbolt" + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/btesting" + + "github.com/stretchr/testify/require" +) + +func TestTx_MoveBucket(t *testing.T) { + testCases := []struct { + name string + srcBucketPath []string + dstBucketPath []string + bucketToMove string + bucketExistInSrc bool + bucketExistInDst bool + hasIncompatibleKeyInSrc bool + hasIncompatibleKeyInDst bool + expectedErr error + }{ + // normal cases + { + name: "normal case", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, + }, + { + name: "the source and target bucket share the same grandparent", + srcBucketPath: []string{"grandparent", "sb2"}, + dstBucketPath: []string{"grandparent", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, + }, + { + name: "bucketToMove is a top level bucket", + srcBucketPath: []string{}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, + }, + { + name: "convert bucketToMove to a top level bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, + }, + // negative cases + { + name: "bucketToMove not exist in source bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: false, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrBucketNotFound, + }, + { + name: "bucketToMove exist in target bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: true, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrBucketExists, + }, + { + name: "incompatible key exist in source bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: false, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: true, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrIncompatibleValue, + }, + { + name: "incompatible key exist in target bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: true, + expectedErr: errors.ErrIncompatibleValue, + }, + { + name: "the source and target are the same bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"sb1", "sb2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrSameBuckets, + }, + { + name: "both the source and target are the root bucket", + srcBucketPath: []string{}, + dstBucketPath: []string{}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrSameBuckets, + }, + } + + for _, tc := range testCases { + + t.Run(tc.name, func(*testing.T) { + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) + + dumpBucketBeforeMoving := filepath.Join(t.TempDir(), "dbBeforeMove") + dumpBucketAfterMoving := filepath.Join(t.TempDir(), "dbAfterMove") + + t.Log("Creating sample db and populate some data") + err := db.Update(func(tx *bbolt.Tx) error { + srcBucket := prepareBuckets(t, tx, tc.srcBucketPath...) + dstBucket := prepareBuckets(t, tx, tc.dstBucketPath...) + + if tc.bucketExistInSrc { + _ = createBucketAndPopulateData(t, tx, srcBucket, tc.bucketToMove) + } + + if tc.bucketExistInDst { + _ = createBucketAndPopulateData(t, tx, dstBucket, tc.bucketToMove) + } + + if tc.hasIncompatibleKeyInSrc { + putErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("bar")) + require.NoError(t, putErr) + } + + if tc.hasIncompatibleKeyInDst { + putErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("bar")) + require.NoError(t, putErr) + } + + return nil + }) + require.NoError(t, err) + + t.Log("Moving bucket") + err = db.Update(func(tx *bbolt.Tx) error { + srcBucket := prepareBuckets(t, tx, tc.srcBucketPath...) + dstBucket := prepareBuckets(t, tx, tc.dstBucketPath...) + + if tc.expectedErr == nil { + t.Logf("Dump the bucket to %s before moving it", dumpBucketBeforeMoving) + bk := openBucket(tx, srcBucket, tc.bucketToMove) + dumpErr := dumpBucket([]byte(tc.bucketToMove), bk, dumpBucketBeforeMoving) + require.NoError(t, dumpErr) + } + + mErr := tx.MoveBucket([]byte(tc.bucketToMove), srcBucket, dstBucket) + require.Equal(t, tc.expectedErr, mErr) + + if tc.expectedErr == nil { + t.Logf("Dump the bucket to %s after moving it", dumpBucketAfterMoving) + bk := openBucket(tx, dstBucket, tc.bucketToMove) + dumpErr := dumpBucket([]byte(tc.bucketToMove), bk, dumpBucketAfterMoving) + require.NoError(t, dumpErr) + } + + return nil + }) + require.NoError(t, err) + + // skip assertion if failure expected + if tc.expectedErr != nil { + return + } + + t.Log("Verifying the bucket should be identical before and after being moved") + dataBeforeMove, err := os.ReadFile(dumpBucketBeforeMoving) + require.NoError(t, err) + dataAfterMove, err := os.ReadFile(dumpBucketAfterMoving) + require.NoError(t, err) + require.Equal(t, dataBeforeMove, dataAfterMove) + }) + } +} + +// prepareBuckets opens the bucket chain. For each bucket in the chain, +// open it if existed, otherwise create it and populate sample data. +func prepareBuckets(t testing.TB, tx *bbolt.Tx, buckets ...string) *bbolt.Bucket { + var bk *bbolt.Bucket + + for _, key := range buckets { + if childBucket := openBucket(tx, bk, key); childBucket == nil { + bk = createBucketAndPopulateData(t, tx, bk, key) + } else { + bk = childBucket + } + } + return bk +} + +func openBucket(tx *bbolt.Tx, bk *bbolt.Bucket, bucketToOpen string) *bbolt.Bucket { + if bk == nil { + return tx.Bucket([]byte(bucketToOpen)) + } + return bk.Bucket([]byte(bucketToOpen)) +} + +func createBucketAndPopulateData(t testing.TB, tx *bbolt.Tx, bk *bbolt.Bucket, bucketName string) *bbolt.Bucket { + if bk == nil { + newBucket, err := tx.CreateBucket([]byte(bucketName)) + require.NoError(t, err, "failed to create bucket %s", bucketName) + populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) + return newBucket + } + + newBucket, err := bk.CreateBucket([]byte(bucketName)) + require.NoError(t, err, "failed to create bucket %s", bucketName) + populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) + return newBucket +} + +func populateSampleDataInBucket(t testing.TB, bk *bbolt.Bucket, n int) { + var min, max = 1, 1024 + + for i := 0; i < n; i++ { + // generate rand key/value length + keyLength := rand.Intn(max-min) + min + valLength := rand.Intn(max-min) + min + + keyData := make([]byte, keyLength) + valData := make([]byte, valLength) + + _, err := crand.Read(keyData) + require.NoError(t, err) + + _, err = crand.Read(valData) + require.NoError(t, err) + + err = bk.Put(keyData, valData) + require.NoError(t, err) + } +} diff --git a/tx.go b/tx.go index 8e624e7b2..81913b0fe 100644 --- a/tx.go +++ b/tx.go @@ -127,6 +127,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. diff --git a/utils_test.go b/utils_test.go new file mode 100644 index 000000000..1a4f23939 --- /dev/null +++ b/utils_test.go @@ -0,0 +1,47 @@ +package bbolt_test + +import ( + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/common" +) + +// `dumpBucket` dumps all the data, including both key/value data +// and child buckets, from the source bucket into the target db file. +func dumpBucket(srcBucketName []byte, srcBucket *bolt.Bucket, dstFilename string) error { + common.Assert(len(srcBucketName) != 0, "source bucket name can't be empty") + common.Assert(srcBucket != nil, "the source bucket can't be nil") + common.Assert(len(dstFilename) != 0, "the target file path can't be empty") + + dstDB, err := bolt.Open(dstFilename, 0600, nil) + if err != nil { + return err + } + defer dstDB.Close() + + return dstDB.Update(func(tx *bolt.Tx) error { + dstBucket, err := tx.CreateBucket(srcBucketName) + if err != nil { + return err + } + return cloneBucket(srcBucket, dstBucket) + }) +} + +func cloneBucket(src *bolt.Bucket, dst *bolt.Bucket) error { + return src.ForEach(func(k, v []byte) error { + if v == nil { + srcChild := src.Bucket(k) + dstChild, err := dst.CreateBucket(k) + if err != nil { + return err + } + if err = dstChild.SetSequence(srcChild.Sequence()); err != nil { + return err + } + + return cloneBucket(srcChild, dstChild) + } + + return dst.Put(k, v) + }) +}