From a23b75020b0cae4adea44acfe767e75f7cdb1035 Mon Sep 17 00:00:00 2001 From: Andre Duffeck Date: Wed, 10 Mar 2021 12:06:54 +0100 Subject: [PATCH] Separate blobs from metadata in the ocis driver (#1452) --- README.md | 2 +- .../separate-blobs-from-metadata-in-ocis.md | 12 + pkg/storage/fs/ocis/blobstore/blobstore.go | 83 ++ .../fs/ocis/blobstore/blobstore_suite_test.go | 31 + .../fs/ocis/blobstore/blobstore_test.go | 118 +++ pkg/storage/fs/ocis/grants.go | 167 ---- pkg/storage/fs/ocis/interfaces.go | 64 -- pkg/storage/fs/ocis/lookup.go | 163 ---- pkg/storage/fs/ocis/metadata.go | 212 ---- pkg/storage/fs/ocis/node.go | 912 ------------------ pkg/storage/fs/ocis/ocis.go | 537 +---------- pkg/storage/fs/ocis/ocis_suite_test.go | 31 + pkg/storage/fs/ocis/ocis_test.go | 169 +--- pkg/storage/fs/ocis/permissions.go | 275 ------ pkg/storage/fs/ocis/recycle.go | 261 ----- pkg/storage/fs/ocis/revisions.go | 191 ---- pkg/storage/fs/ocis/tree.go | 422 -------- pkg/storage/fs/ocis/upload.go | 655 ------------- pkg/storage/fs/s3ng/option.go | 40 - pkg/storage/fs/s3ng/s3ng.go | 415 +------- pkg/storage/fs/s3ng/s3ng_test.go | 85 +- pkg/storage/fs/s3ng/testhelpers/helpers.go | 81 -- pkg/storage/fs/s3ng/tree/tree_test.go | 213 ---- .../utils/decomposedfs/decomposedfs.go | 485 ++++++++++ .../decomposedfs_concurrency_test.go | 160 +++ .../decomposedfs/decomposedfs_suite_test.go | 31 + .../utils/decomposedfs/decomposedfs_test.go | 94 ++ .../decomposedfs/decomposedfs_unix.go} | 4 +- .../decomposedfs/decomposedfs_windows.go} | 4 +- .../{fs/s3ng => utils/decomposedfs}/grants.go | 18 +- .../{fs/s3ng => utils/decomposedfs}/lookup.go | 13 +- .../s3ng => utils/decomposedfs}/metadata.go | 18 +- .../decomposedfs}/mocks/PermissionsChecker.go | 2 +- .../s3ng => utils/decomposedfs}/mocks/Tree.go | 2 +- .../s3ng => utils/decomposedfs}/node/node.go | 188 ++-- .../decomposedfs/node/node_suite_test.go} | 10 +- .../utils/decomposedfs/node/node_test.go | 150 +++ .../decomposedfs}/node/permissions.go | 6 +- .../decomposedfs/options/options.go} | 73 +- .../options/options_suite_test.go | 31 + .../decomposedfs/options/options_test.go | 60 ++ .../s3ng => utils/decomposedfs}/recycle.go | 20 +- .../s3ng => utils/decomposedfs}/revisions.go | 21 +- .../utils/decomposedfs/testhelpers/helpers.go | 187 ++++ .../decomposedfs}/tree/mocks/Blobstore.go | 0 .../s3ng => utils/decomposedfs}/tree/tree.go | 170 +++- .../decomposedfs}/tree/tree_suite_test.go | 0 .../utils/decomposedfs/tree/tree_test.go | 281 ++++++ .../{fs/s3ng => utils/decomposedfs}/upload.go | 89 +- .../decomposedfs}/upload_test.go | 35 +- .../decomposedfs}/xattrs/xattrs.go | 9 +- .../local/storage-publiclink.toml | 9 - 52 files changed, 2233 insertions(+), 5076 deletions(-) create mode 100644 changelog/unreleased/separate-blobs-from-metadata-in-ocis.md create mode 100644 pkg/storage/fs/ocis/blobstore/blobstore.go create mode 100644 pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go create mode 100644 pkg/storage/fs/ocis/blobstore/blobstore_test.go delete mode 100644 pkg/storage/fs/ocis/grants.go delete mode 100644 pkg/storage/fs/ocis/interfaces.go delete mode 100644 pkg/storage/fs/ocis/lookup.go delete mode 100644 pkg/storage/fs/ocis/metadata.go delete mode 100644 pkg/storage/fs/ocis/node.go create mode 100644 pkg/storage/fs/ocis/ocis_suite_test.go delete mode 100644 pkg/storage/fs/ocis/permissions.go delete mode 100644 pkg/storage/fs/ocis/recycle.go delete mode 100644 pkg/storage/fs/ocis/revisions.go delete mode 100644 pkg/storage/fs/ocis/tree.go delete mode 100644 pkg/storage/fs/ocis/upload.go delete mode 100644 pkg/storage/fs/s3ng/testhelpers/helpers.go delete mode 100644 pkg/storage/fs/s3ng/tree/tree_test.go create mode 100644 pkg/storage/utils/decomposedfs/decomposedfs.go create mode 100644 pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go create mode 100644 pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go create mode 100644 pkg/storage/utils/decomposedfs/decomposedfs_test.go rename pkg/storage/{fs/ocis/ocis_unix.go => utils/decomposedfs/decomposedfs_unix.go} (91%) rename pkg/storage/{fs/ocis/ocis_windows.go => utils/decomposedfs/decomposedfs_windows.go} (92%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/grants.go (85%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/lookup.go (92%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/metadata.go (89%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/mocks/PermissionsChecker.go (97%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/mocks/Tree.go (99%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/node/node.go (83%) rename pkg/storage/{fs/s3ng/node/node_test.go => utils/decomposedfs/node/node_suite_test.go} (87%) create mode 100644 pkg/storage/utils/decomposedfs/node/node_test.go rename pkg/storage/{fs/s3ng => utils/decomposedfs}/node/permissions.go (97%) rename pkg/storage/{fs/ocis/option.go => utils/decomposedfs/options/options.go} (60%) create mode 100644 pkg/storage/utils/decomposedfs/options/options_suite_test.go create mode 100644 pkg/storage/utils/decomposedfs/options/options_test.go rename pkg/storage/{fs/s3ng => utils/decomposedfs}/recycle.go (90%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/revisions.go (85%) create mode 100644 pkg/storage/utils/decomposedfs/testhelpers/helpers.go rename pkg/storage/{fs/s3ng => utils/decomposedfs}/tree/mocks/Blobstore.go (100%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/tree/tree.go (75%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/tree/tree_suite_test.go (100%) create mode 100644 pkg/storage/utils/decomposedfs/tree/tree_test.go rename pkg/storage/{fs/s3ng => utils/decomposedfs}/upload.go (84%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/upload_test.go (83%) rename pkg/storage/{fs/s3ng => utils/decomposedfs}/xattrs/xattrs.go (88%) diff --git a/README.md b/README.md index 46b77242fe..218d0a17b7 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ You can also read the [build from sources guide](https://reva.link/docs/getting- ../../../cmd/revad/revad -c gateway.toml & ../../../cmd/revad/revad -c shares.toml & ../../../cmd/revad/revad -c storage-home.toml & - ../../../cmd/revad/revad -c storage-oc.toml & + ../../../cmd/revad/revad -c storage-users.toml & ../../../cmd/revad/revad -c storage-publiclink.toml & ../../../cmd/revad/revad -c ldap-users.toml ``` diff --git a/changelog/unreleased/separate-blobs-from-metadata-in-ocis.md b/changelog/unreleased/separate-blobs-from-metadata-in-ocis.md new file mode 100644 index 0000000000..fdbf692c34 --- /dev/null +++ b/changelog/unreleased/separate-blobs-from-metadata-in-ocis.md @@ -0,0 +1,12 @@ +Change: Separate blobs from metadata in the ocis storage driver + +We changed the ocis storage driver to keep the file content separate from the +metadata by storing the blobs in a separate directory. This allows for using +a different (potentially faster) storage for the metadata. + +**Note** This change makes existing ocis storages incompatible with the new code. + +We also streamlined the ocis and the s3ng drivers so that most of the code is +shared between them. + +https://github.com/cs3org/reva/pull/1452 \ No newline at end of file diff --git a/pkg/storage/fs/ocis/blobstore/blobstore.go b/pkg/storage/fs/ocis/blobstore/blobstore.go new file mode 100644 index 0000000000..aab9f88ca4 --- /dev/null +++ b/pkg/storage/fs/ocis/blobstore/blobstore.go @@ -0,0 +1,83 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package blobstore + +import ( + "bufio" + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// Blobstore provides an interface to an filesystem based blobstore +type Blobstore struct { + root string +} + +// New returns a new Blobstore +func New(root string) (*Blobstore, error) { + err := os.MkdirAll(root, 0700) + if err != nil { + return nil, err + } + + return &Blobstore{ + root: root, + }, nil +} + +// Upload stores some data in the blobstore under the given key +func (bs *Blobstore) Upload(key string, data io.Reader) error { + f, err := os.OpenFile(bs.path(key), os.O_CREATE|os.O_WRONLY, 0700) + if err != nil { + return errors.Wrapf(err, "could not open blob '%s' for writing", key) + } + + w := bufio.NewWriter(f) + _, err = w.ReadFrom(data) + if err != nil { + return errors.Wrapf(err, "could not write blob '%s'", key) + } + + return w.Flush() +} + +// Download retrieves a blob from the blobstore for reading +func (bs *Blobstore) Download(key string) (io.ReadCloser, error) { + file, err := os.Open(bs.path(key)) + if err != nil { + return nil, errors.Wrapf(err, "could not read blob '%s'", key) + } + return file, nil +} + +// Delete deletes a blob from the blobstore +func (bs *Blobstore) Delete(key string) error { + err := os.Remove(bs.path(key)) + if err != nil { + return errors.Wrapf(err, "could not delete blob '%s'", key) + } + return nil +} + +func (bs *Blobstore) path(key string) string { + return filepath.Join(bs.root, filepath.Clean(filepath.Join("/", key))) +} diff --git a/pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go b/pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go new file mode 100644 index 0000000000..4add51ce11 --- /dev/null +++ b/pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package blobstore_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestBlobstore(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Blobstore Suite") +} diff --git a/pkg/storage/fs/ocis/blobstore/blobstore_test.go b/pkg/storage/fs/ocis/blobstore/blobstore_test.go new file mode 100644 index 0000000000..7e213ba5af --- /dev/null +++ b/pkg/storage/fs/ocis/blobstore/blobstore_test.go @@ -0,0 +1,118 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package blobstore_test + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/cs3org/reva/pkg/storage/fs/ocis/blobstore" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Blobstore", func() { + var ( + tmpRoot string + key string + blobPath string + data []byte + + bs *blobstore.Blobstore + ) + + BeforeEach(func() { + var err error + tmpRoot, err = ioutil.TempDir("", "reva-unit-tests-*-root") + Expect(err).ToNot(HaveOccurred()) + + data = []byte("1234567890") + key = "foo" + blobPath = path.Join(tmpRoot, "blobs", key) + + bs, err = blobstore.New(path.Join(tmpRoot, "blobs")) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + if strings.HasPrefix(tmpRoot, os.TempDir()) { + os.RemoveAll(tmpRoot) + } + }) + + It("creates the root directory if it doesn't exist", func() { + _, err := os.Stat(path.Join(tmpRoot, "blobs")) + Expect(err).ToNot(HaveOccurred()) + }) + + Describe("Upload", func() { + It("writes the blob", func() { + err := bs.Upload(key, bytes.NewReader(data)) + Expect(err).ToNot(HaveOccurred()) + + writtenBytes, err := ioutil.ReadFile(blobPath) + Expect(err).ToNot(HaveOccurred()) + Expect(writtenBytes).To(Equal(data)) + }) + }) + + Context("with an existing blob", func() { + BeforeEach(func() { + Expect(ioutil.WriteFile(blobPath, data, 0700)).To(Succeed()) + }) + + Describe("Download", func() { + It("cleans the key", func() { + reader, err := bs.Download("../" + key) + Expect(err).ToNot(HaveOccurred()) + + readData, err := ioutil.ReadAll(reader) + Expect(err).ToNot(HaveOccurred()) + Expect(readData).To(Equal(data)) + }) + + It("returns a reader to the blob", func() { + reader, err := bs.Download(key) + Expect(err).ToNot(HaveOccurred()) + + readData, err := ioutil.ReadAll(reader) + Expect(err).ToNot(HaveOccurred()) + Expect(readData).To(Equal(data)) + }) + }) + + Describe("Delete", func() { + It("deletes the blob", func() { + _, err := os.Stat(blobPath) + Expect(err).ToNot(HaveOccurred()) + + err = bs.Delete(key) + Expect(err).ToNot(HaveOccurred()) + + _, err = os.Stat(blobPath) + Expect(err).To(HaveOccurred()) + }) + }) + }) + +}) diff --git a/pkg/storage/fs/ocis/grants.go b/pkg/storage/fs/ocis/grants.go deleted file mode 100644 index 329a471165..0000000000 --- a/pkg/storage/fs/ocis/grants.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "path/filepath" - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/ace" - "github.com/pkg/xattr" -) - -func (fs *ocisfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - log := appctx.GetLogger(ctx) - log.Debug().Interface("ref", ref).Interface("grant", g).Msg("AddGrant()") - var node *Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 - return rp.AddGrant || rp.UpdateGrant - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - np := fs.lu.toInternalPath(node.ID) - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(np, grantPrefix+principal, value); err != nil { - return err - } - return fs.tp.Propagate(ctx, node) -} - -func (fs *ocisfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { - var node *Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.ListGrants - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - log := appctx.GetLogger(ctx) - np := fs.lu.toInternalPath(node.ID) - var attrs []string - if attrs, err = xattr.List(np); err != nil { - log.Error().Err(err).Msg("error listing attributes") - return nil, err - } - - log.Debug().Interface("attrs", attrs).Msg("read attributes") - - aces := extractACEsFromAttrs(ctx, np, attrs) - - grants = make([]*provider.Grant, 0, len(aces)) - for i := range aces { - grants = append(grants, aces[i].Grant()) - } - - return grants, nil -} - -func (fs *ocisfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - var node *Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.RemoveGrant - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - var attr string - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - attr = grantPrefix + _groupAcePrefix + g.Grantee.GetGroupId().OpaqueId - } else { - attr = grantPrefix + _userAcePrefix + g.Grantee.GetUserId().OpaqueId - } - - np := fs.lu.toInternalPath(node.ID) - if err = xattr.Remove(np, attr); err != nil { - return - } - - return fs.tp.Propagate(ctx, node) -} - -func (fs *ocisfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 - return fs.AddGrant(ctx, ref, g) -} - -// extractACEsFromAttrs reads ACEs in the list of attrs from the node -func extractACEsFromAttrs(ctx context.Context, fsfn string, attrs []string) (entries []*ace.ACE) { - log := appctx.GetLogger(ctx) - entries = []*ace.ACE{} - for i := range attrs { - if strings.HasPrefix(attrs[i], grantPrefix) { - var value []byte - var err error - if value, err = xattr.Get(fsfn, attrs[i]); err != nil { - log.Error().Err(err).Str("attr", attrs[i]).Msg("could not read attribute") - continue - } - var e *ace.ACE - principal := attrs[i][len(grantPrefix):] - if e, err = ace.Unmarshal(principal, value); err != nil { - log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") - continue - } - entries = append(entries, e) - } - } - return -} diff --git a/pkg/storage/fs/ocis/interfaces.go b/pkg/storage/fs/ocis/interfaces.go deleted file mode 100644 index 48b2aaf177..0000000000 --- a/pkg/storage/fs/ocis/interfaces.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "os" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" -) - -// TODO the different aspects of a storage: Tree, Lookup and Permissions should be able to be reusable -// Below is a start of Interfaces that needs to be worked out further - -// TreePersistence is used to manage a tree hierarchy -type TreePersistence interface { - GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) - GetMD(ctx context.Context, node *Node) (os.FileInfo, error) - ListFolder(ctx context.Context, node *Node) ([]*Node, error) - //CreateHome(owner *userpb.UserId) (n *Node, err error) - CreateDir(ctx context.Context, node *Node) (err error) - //CreateReference(ctx context.Context, node *Node, targetURI *url.URL) error - Move(ctx context.Context, oldNode *Node, newNode *Node) (err error) - Delete(ctx context.Context, node *Node) (err error) - - Propagate(ctx context.Context, node *Node) (err error) -} - -// Lookup is used to encapsulate path transformations -/* -type Lookup interface { - NodeFromResource(ctx context.Context, ref *provider.Reference) (node *Node, err error) - NodeFromID(ctx context.Context, id *provider.ResourceId) (node *Node, err error) - NodeFromPath(ctx context.Context, fn string) (node *Node, err error) - Path(ctx context.Context, node *Node) (path string, err error) - - // HomeNode returns the currently logged in users home node - // requires EnableHome to be true - HomeNode(ctx context.Context) (node *Node, err error) - - // RootNode returns the storage root node - RootNode(ctx context.Context) (node *Node, err error) - - // HomeOrRootNode returns the users home node when home support is enabled. - // it returns the storages root node otherwise - HomeOrRootNode(ctx context.Context) (node *Node, err error) -} -*/ diff --git a/pkg/storage/fs/ocis/lookup.go b/pkg/storage/fs/ocis/lookup.go deleted file mode 100644 index a7414f086f..0000000000 --- a/pkg/storage/fs/ocis/lookup.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/cs3org/reva/pkg/user" -) - -// Lookup implements transformations from filepath to node and back -type Lookup struct { - Options *Options -} - -// NodeFromResource takes in a request path or request id and converts it to a Node -func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*Node, error) { - if ref.GetPath() != "" { - return lu.NodeFromPath(ctx, ref.GetPath()) - } - - if ref.GetId() != nil { - return lu.NodeFromID(ctx, ref.GetId()) - } - - // reference is invalid - return nil, fmt.Errorf("invalid reference %+v", ref) -} - -// NodeFromPath converts a filename into a Node -func (lu *Lookup) NodeFromPath(ctx context.Context, fn string) (node *Node, err error) { - log := appctx.GetLogger(ctx) - log.Debug().Interface("fn", fn).Msg("NodeFromPath()") - - if node, err = lu.HomeOrRootNode(ctx); err != nil { - return - } - - // TODO collect permissions of the current user on every segment - if fn != "/" { - node, err = lu.WalkPath(ctx, node, fn, func(ctx context.Context, n *Node) error { - log.Debug().Interface("node", n).Msg("NodeFromPath() walk") - return nil - }) - } - - return -} - -// NodeFromID returns the internal path for the id -func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *Node, err error) { - if id == nil || id.OpaqueId == "" { - return nil, fmt.Errorf("invalid resource id %+v", id) - } - return ReadNode(ctx, lu, id.OpaqueId) -} - -// Path returns the path for node -func (lu *Lookup) Path(ctx context.Context, n *Node) (p string, err error) { - var root *Node - if root, err = lu.HomeOrRootNode(ctx); err != nil { - return - } - for n.ID != root.ID { - p = filepath.Join(n.Name, p) - if n, err = n.Parent(); err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("path", p). - Interface("node", n). - Msg("Path()") - return - } - } - return -} - -// RootNode returns the root node of the storage -func (lu *Lookup) RootNode(ctx context.Context) (node *Node, err error) { - return &Node{ - lu: lu, - ID: "root", - Name: "", - ParentID: "", - Exists: true, - }, nil -} - -// HomeNode returns the home node of a user -func (lu *Lookup) HomeNode(ctx context.Context) (node *Node, err error) { - if !lu.Options.EnableHome { - return nil, errtypes.NotSupported("ocisfs: home supported disabled") - } - - if node, err = lu.RootNode(ctx); err != nil { - return - } - node, err = lu.WalkPath(ctx, node, lu.mustGetUserLayout(ctx), nil) - return -} - -// WalkPath calls n.Child(segment) on every path segment in p starting at the node r -// If a function f is given it will be executed for every segment node, but not the root node r -func (lu *Lookup) WalkPath(ctx context.Context, r *Node, p string, f func(ctx context.Context, n *Node) error) (*Node, error) { - segments := strings.Split(strings.Trim(p, "/"), "/") - var err error - for i := range segments { - if r, err = r.Child(segments[i]); err != nil { - return r, err - } - // if an intermediate node is missing return not found - if !r.Exists && i < len(segments)-1 { - return r, errtypes.NotFound(segments[i]) - } - if f != nil { - if err = f(ctx, r); err != nil { - return r, err - } - } - } - return r, nil -} - -// HomeOrRootNode returns the users home node when home support is enabled. -// it returns the storages root node otherwise -func (lu *Lookup) HomeOrRootNode(ctx context.Context) (node *Node, err error) { - if lu.Options.EnableHome { - return lu.HomeNode(ctx) - } - return lu.RootNode(ctx) -} - -func (lu *Lookup) mustGetUserLayout(ctx context.Context) string { - u := user.ContextMustGetUser(ctx) - return templates.WithUser(u, lu.Options.UserLayout) -} - -func (lu *Lookup) toInternalPath(id string) string { - return filepath.Join(lu.Options.Root, "nodes", id) -} diff --git a/pkg/storage/fs/ocis/metadata.go b/pkg/storage/fs/ocis/metadata.go deleted file mode 100644 index 2fb2e879d3..0000000000 --- a/pkg/storage/fs/ocis/metadata.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "fmt" - "path/filepath" - "strconv" - "strings" - "time" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/user" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -func parseMTime(v string) (t time.Time, err error) { - p := strings.SplitN(v, ".", 2) - var sec, nsec int64 - if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { - if len(p) > 1 { - nsec, err = strconv.ParseInt(p[1], 10, 64) - } - } - return time.Unix(sec, nsec), err -} - -func (fs *ocisfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { - n, err := fs.lu.NodeFromResource(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocisfs: error resolving ref") - } - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - // TODO add explicit SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91 - return rp.InitiateFileUpload - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - nodePath := n.lu.toInternalPath(n.ID) - - errs := []error{} - // TODO should we really continue updating when an error occurs? - if md.Metadata != nil { - if val, ok := md.Metadata["mtime"]; ok { - delete(md.Metadata, "mtime") - err := n.SetMtime(ctx, val) - if err != nil { - errs = append(errs, errors.Wrap(err, "could not set mtime")) - } - } - // TODO(jfd) special handling for atime? - // TODO(jfd) allow setting birth time (btime)? - // TODO(jfd) any other metadata that is interesting? fileid? - // TODO unset when file is updated - // TODO unset when folder is updated or add timestamp to etag? - if val, ok := md.Metadata["etag"]; ok { - delete(md.Metadata, "etag") - err := n.SetEtag(ctx, val) - if err != nil { - errs = append(errs, errors.Wrap(err, "could not set etag")) - } - } - if val, ok := md.Metadata[_favoriteKey]; ok { - delete(md.Metadata, _favoriteKey) - if u, ok := user.ContextGetUser(ctx); ok { - if uid := u.GetId(); uid != nil { - if err := n.SetFavorite(uid, val); err != nil { - sublog.Error().Err(err). - Interface("user", u). - Msg("could not set favorite flag") - errs = append(errs, errors.Wrap(err, "could not set favorite flag")) - } - } else { - sublog.Error().Interface("user", u).Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - sublog.Error().Interface("user", u).Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - } - } - for k, v := range md.Metadata { - attrName := metadataPrefix + k - if err = xattr.Set(nodePath, attrName, []byte(v)); err != nil { - errs = append(errs, errors.Wrap(err, "ocisfs: could not set metadata attribute "+attrName+" to "+k)) - } - } - - switch len(errs) { - case 0: - return fs.tp.Propagate(ctx, n) - case 1: - // TODO Propagate if anything changed - return errs[0] - default: - // TODO Propagate if anything changed - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -func (fs *ocisfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { - n, err := fs.lu.NodeFromResource(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocisfs: error resolving ref") - } - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - // TODO use SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91 - return rp.InitiateFileUpload - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - nodePath := n.lu.toInternalPath(n.ID) - errs := []error{} - for _, k := range keys { - switch k { - case _favoriteKey: - if u, ok := user.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Remove(nodePath, fa); err != nil { - sublog.Error().Err(err). - Interface("user", u). - Str("key", fa). - Msg("could not unset favorite flag") - errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) - } - } else { - sublog.Error(). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - sublog.Error(). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - default: - if err = xattr.Remove(nodePath, metadataPrefix+k); err != nil { - // a non-existing attribute will return an error, which we can ignore - // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) - if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - sublog.Error().Err(err). - Str("key", k). - Msg("could not unset metadata") - errs = append(errs, errors.Wrap(err, "could not unset metadata")) - } - } - } - } - switch len(errs) { - case 0: - return fs.tp.Propagate(ctx, n) - case 1: - // TODO Propagate if anything changed - return errs[0] - default: - // TODO Propagate if anything changed - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} diff --git a/pkg/storage/fs/ocis/node.go b/pkg/storage/fs/ocis/node.go deleted file mode 100644 index 6e7d2854bd..0000000000 --- a/pkg/storage/fs/ocis/node.go +++ /dev/null @@ -1,912 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "crypto/md5" - "encoding/hex" - "fmt" - "hash" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - "github.com/cs3org/reva/internal/grpc/services/storageprovider" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/mime" - "github.com/cs3org/reva/pkg/storage/utils/ace" - "github.com/cs3org/reva/pkg/user" - "github.com/pkg/errors" - "github.com/pkg/xattr" - "github.com/rs/zerolog/log" -) - -const ( - _shareTypesKey = "http://owncloud.org/ns/share-types" - _userShareType = "0" - - _favoriteKey = "http://owncloud.org/ns/favorite" - _checksumsKey = "http://owncloud.org/ns/checksums" - _quotaKey = "quota" - - _quotaUncalculated = "-1" - _quotaUnknown = "-2" - _quotaUnlimited = "-3" -) - -// Node represents a node in the tree and provides methods to get a Parent or Child instance -type Node struct { - lu *Lookup - ParentID string - ID string - Name string - owner *userpb.UserId - Exists bool -} - -func (n *Node) writeMetadata(owner *userpb.UserId) (err error) { - nodePath := n.lu.toInternalPath(n.ID) - if err = xattr.Set(nodePath, parentidAttr, []byte(n.ParentID)); err != nil { - return errors.Wrap(err, "ocisfs: could not set parentid attribute") - } - if err = xattr.Set(nodePath, nameAttr, []byte(n.Name)); err != nil { - return errors.Wrap(err, "ocisfs: could not set name attribute") - } - if owner == nil { - if err = xattr.Set(nodePath, ownerIDAttr, []byte("")); err != nil { - return errors.Wrap(err, "ocisfs: could not set empty owner id attribute") - } - if err = xattr.Set(nodePath, ownerIDPAttr, []byte("")); err != nil { - return errors.Wrap(err, "ocisfs: could not set empty owner idp attribute") - } - } else { - if err = xattr.Set(nodePath, ownerIDAttr, []byte(owner.OpaqueId)); err != nil { - return errors.Wrap(err, "ocisfs: could not set owner id attribute") - } - if err = xattr.Set(nodePath, ownerIDPAttr, []byte(owner.Idp)); err != nil { - return errors.Wrap(err, "ocisfs: could not set owner idp attribute") - } - } - return -} - -// ReadRecycleItem reads a recycle item as a node -// TODO refactor the returned params into Node properties? would make all the path transformations go away... -func ReadRecycleItem(ctx context.Context, lu *Lookup, key string) (n *Node, trashItem string, deletedNodePath string, origin string, err error) { - - if key == "" { - return nil, "", "", "", errtypes.InternalError("key is empty") - } - - kp := strings.SplitN(key, ":", 2) - if len(kp) != 2 { - appctx.GetLogger(ctx).Error().Err(err).Str("key", key).Msg("malformed key") - return - } - trashItem = filepath.Join(lu.Options.Root, "trash", kp[0], kp[1]) - - var link string - link, err = os.Readlink(trashItem) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return - } - parts := strings.SplitN(filepath.Base(link), ".T.", 2) - if len(parts) != 2 { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Interface("parts", parts).Msg("malformed trash link") - return - } - - n = &Node{ - lu: lu, - ID: parts[0], - } - - deletedNodePath = lu.toInternalPath(filepath.Base(link)) - - // lookup parent id in extended attributes - var attrBytes []byte - if attrBytes, err = xattr.Get(deletedNodePath, parentidAttr); err == nil { - n.ParentID = string(attrBytes) - } else { - return - } - // lookup name in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, nameAttr); err == nil { - n.Name = string(attrBytes) - } else { - return - } - // lookup ownerId in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, ownerIDAttr); err == nil { - n.owner = &userpb.UserId{} - n.owner.OpaqueId = string(attrBytes) - } else { - return - } - // lookup ownerIdp in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, ownerIDPAttr); err == nil { - if n.owner == nil { - n.owner = &userpb.UserId{} - } - n.owner.Idp = string(attrBytes) - } else { - return - } - - // get origin node - origin = "/" - - // lookup origin path in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, trashOriginAttr); err == nil { - origin = string(attrBytes) - } else { - log.Error().Err(err).Str("trashItem", trashItem).Str("link", link).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") - } - return -} - -// ReadNode creates a new instance from an id and checks if it exists -func ReadNode(ctx context.Context, lu *Lookup, id string) (n *Node, err error) { - n = &Node{ - lu: lu, - ID: id, - } - - nodePath := lu.toInternalPath(n.ID) - - // lookup parent id in extended attributes - var attrBytes []byte - attrBytes, err = xattr.Get(nodePath, parentidAttr) - switch { - case err == nil: - n.ParentID = string(attrBytes) - case isNoData(err): - return nil, errtypes.InternalError(err.Error()) - case isNotFound(err): - return n, nil // swallow not found, the node defaults to exists = false - default: - return nil, errtypes.InternalError(err.Error()) - } - // lookup name in extended attributes - if attrBytes, err = xattr.Get(nodePath, nameAttr); err == nil { - n.Name = string(attrBytes) - } else { - return - } - - var root *Node - if root, err = lu.HomeOrRootNode(ctx); err != nil { - return - } - parentID := n.ParentID - - log := appctx.GetLogger(ctx) - for parentID != root.ID { - log.Debug().Interface("node", n).Str("root.ID", root.ID).Msg("ReadNode()") - // walk to root to check node is not part of a deleted subtree - - if attrBytes, err = xattr.Get(lu.toInternalPath(parentID), parentidAttr); err == nil { - parentID = string(attrBytes) - log.Debug().Interface("node", n).Str("root.ID", root.ID).Str("parentID", parentID).Msg("ReadNode() found parent") - } else { - log.Error().Err(err).Interface("node", n).Str("root.ID", root.ID).Msg("ReadNode()") - if isNotFound(err) { - return nil, errtypes.NotFound(err.Error()) - } - return - } - } - - n.Exists = true - log.Debug().Interface("node", n).Msg("ReadNode() found node") - - return -} - -// Child returns the child node with the given name -func (n *Node) Child(name string) (c *Node, err error) { - c = &Node{ - lu: n.lu, - ParentID: n.ID, - Name: name, - } - var link string - if link, err = os.Readlink(filepath.Join(n.lu.toInternalPath(n.ID), name)); os.IsNotExist(err) { - err = nil // if the file does not exist we return a node that has Exists = false - return - } - if err != nil { - err = errors.Wrap(err, "ocisfs: Wrap: readlink error") - return - } - if strings.HasPrefix(link, "../") { - c.Exists = true - c.ID = filepath.Base(link) - } else { - err = fmt.Errorf("ocisfs: expected '../ prefix, got' %+v", link) - } - return -} - -// Parent returns the parent node -func (n *Node) Parent() (p *Node, err error) { - if n.ParentID == "" { - return nil, fmt.Errorf("ocisfs: root has no parent") - } - p = &Node{ - lu: n.lu, - ID: n.ParentID, - } - - parentPath := n.lu.toInternalPath(n.ParentID) - - // lookup parent id in extended attributes - var attrBytes []byte - if attrBytes, err = xattr.Get(parentPath, parentidAttr); err == nil { - p.ParentID = string(attrBytes) - } else { - return - } - // lookup name in extended attributes - if attrBytes, err = xattr.Get(parentPath, nameAttr); err == nil { - p.Name = string(attrBytes) - } else { - return - } - - // check node exists - if _, err := os.Stat(parentPath); err == nil { - p.Exists = true - } - return -} - -// Owner returns the cached owner id or reads it from the extended attributes -// TODO can be private as only the AsResourceInfo uses it -func (n *Node) Owner() (o *userpb.UserId, err error) { - if n.owner != nil { - return n.owner, nil - } - - // FIXME ... do we return the owner of the reference or the owner of the target? - // we don't really know the owner of the target ... and as the reference may point anywhere we cannot really find out - // but what are the permissions? all? none? the gateway has to fill in? - // TODO what if this is a reference? - nodePath := n.lu.toInternalPath(n.ID) - // lookup parent id in extended attributes - var attrBytes []byte - // lookup name in extended attributes - if attrBytes, err = xattr.Get(nodePath, ownerIDAttr); err == nil { - if n.owner == nil { - n.owner = &userpb.UserId{} - } - n.owner.OpaqueId = string(attrBytes) - } else { - return - } - // lookup name in extended attributes - if attrBytes, err = xattr.Get(nodePath, ownerIDPAttr); err == nil { - if n.owner == nil { - n.owner = &userpb.UserId{} - } - n.owner.Idp = string(attrBytes) - } else { - return - } - return n.owner, err -} - -// PermissionSet returns the permission set for the current user -// the parent nodes are not taken into account -func (n *Node) PermissionSet(ctx context.Context) *provider.ResourcePermissions { - u, ok := user.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") - return noPermissions - } - if o, _ := n.Owner(); isSameUserID(u.Id, o) { - return ownerPermissions - } - // read the permissions for the current user from the acls of the current node - if np, err := n.ReadUserPermissions(ctx, u); err == nil { - return np - } - return noPermissions -} - -// calculateEtag returns a hash of fileid + tmtime (or mtime) -func calculateEtag(nodeID string, tmTime time.Time) (string, error) { - h := md5.New() - if _, err := io.WriteString(h, nodeID); err != nil { - return "", err - } - if tb, err := tmTime.UTC().MarshalBinary(); err == nil { - if _, err := h.Write(tb); err != nil { - return "", err - } - } else { - return "", err - } - return fmt.Sprintf(`"%x"`, h.Sum(nil)), nil -} - -// SetMtime sets the mtime and atime of a node -func (n *Node) SetMtime(ctx context.Context, mtime string) error { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - if mt, err := parseMTime(mtime); err == nil { - nodePath := n.lu.toInternalPath(n.ID) - // updating mtime also updates atime - if err := os.Chtimes(nodePath, mt, mt); err != nil { - sublog.Error().Err(err). - Time("mtime", mt). - Msg("could not set mtime") - return errors.Wrap(err, "could not set mtime") - } - } else { - sublog.Error().Err(err). - Str("mtime", mtime). - Msg("could not parse mtime") - return errors.Wrap(err, "could not parse mtime") - } - return nil -} - -// SetEtag sets the temporary etag of a node if it differs from the current etag -func (n *Node) SetEtag(ctx context.Context, val string) (err error) { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - nodePath := n.lu.toInternalPath(n.ID) - var tmTime time.Time - if tmTime, err = n.GetTMTime(); err != nil { - // no tmtime, use mtime - var fi os.FileInfo - if fi, err = os.Lstat(nodePath); err != nil { - return - } - tmTime = fi.ModTime() - } - var etag string - if etag, err = calculateEtag(n.ID, tmTime); err != nil { - return - } - - // sanitize etag - val = fmt.Sprintf("\"%s\"", strings.Trim(val, "\"")) - if etag == val { - sublog.Debug(). - Str("etag", val). - Msg("ignoring request to update identical etag") - return nil - } - // etag is only valid until the calculated etag changes, is part of propagation - return xattr.Set(nodePath, tmpEtagAttr, []byte(val)) -} - -// SetFavorite sets the favorite for the current user -// TODO we should not mess with the user here ... the favorites is now a user specific property for a file -// that cannot be mapped to extended attributes without leaking who has marked a file as a favorite -// it is a specific case of a tag, which is user individual as well -// TODO there are different types of tags -// 1. public that are managed by everyone -// 2. private tags that are only visible to the user -// 3. system tags that are only visible to the system -// 4. group tags that are only visible to a group ... -// urgh ... well this can be solved using different namespaces -// 1. public = p: -// 2. private = u:: for user specific -// 3. system = s: for system -// 4. group = g:: -// 5. app? = a:: for apps? -// obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem -// public tags can be mapped to extended attributes -func (n *Node) SetFavorite(uid *userpb.UserId, val string) error { - nodePath := n.lu.toInternalPath(n.ID) - // the favorite flag is specific to the user, so we need to incorporate the userid - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - return xattr.Set(nodePath, fa, []byte(val)) -} - -// AsResourceInfo return the node as CS3 ResourceInfo -func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissions, mdKeys []string) (ri *provider.ResourceInfo, err error) { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - - var fn string - nodePath := n.lu.toInternalPath(n.ID) - - var fi os.FileInfo - - nodeType := provider.ResourceType_RESOURCE_TYPE_INVALID - if fi, err = os.Lstat(nodePath); err != nil { - return - } - - var target []byte - switch { - case fi.IsDir(): - if target, err = xattr.Get(nodePath, referenceAttr); err == nil { - nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE - } else { - nodeType = provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - case fi.Mode().IsRegular(): - nodeType = provider.ResourceType_RESOURCE_TYPE_FILE - case fi.Mode()&os.ModeSymlink != 0: - nodeType = provider.ResourceType_RESOURCE_TYPE_SYMLINK - // TODO reference using ext attr on a symlink - // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE - } - - id := &provider.ResourceId{OpaqueId: n.ID} - - fn, err = n.lu.Path(ctx, n) - if err != nil { - return nil, err - } - - ri = &provider.ResourceInfo{ - Id: id, - Path: fn, - Type: nodeType, - MimeType: mime.Detect(nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER, fn), - Size: uint64(fi.Size()), - Target: string(target), - PermissionSet: rp, - } - if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER { - ts, err := n.GetTreeSize() - if err == nil { - ri.Size = ts - } else { - ri.Size = 0 // make dirs always return 0 if it is unknown - sublog.Debug().Err(err).Msg("could not read treesize") - } - } - - if ri.Owner, err = n.Owner(); err != nil { - sublog.Debug().Err(err).Msg("could not determine owner") - } - - // TODO make etag of files use fileid and checksum - - var tmTime time.Time - if tmTime, err = n.GetTMTime(); err != nil { - // no tmtime, use mtime - tmTime = fi.ModTime() - } - - // use temporary etag if it is set - if b, err := xattr.Get(nodePath, tmpEtagAttr); err == nil { - ri.Etag = fmt.Sprintf(`"%x"`, string(b)) // TODO why do we convert string(b)? is the temporary etag stored as string? -> should we use bytes? use hex.EncodeToString? - } else if ri.Etag, err = calculateEtag(n.ID, tmTime); err != nil { - sublog.Debug().Err(err).Msg("could not calculate etag") - } - - // mtime uses tmtime if present - // TODO expose mtime and tmtime separately? - un := tmTime.UnixNano() - ri.Mtime = &types.Timestamp{ - Seconds: uint64(un / 1000000000), - Nanos: uint32(un % 1000000000), - } - - mdKeysMap := make(map[string]struct{}) - for _, k := range mdKeys { - mdKeysMap[k] = struct{}{} - } - - var returnAllKeys bool - if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { - returnAllKeys = true - } - - metadata := map[string]string{} - - // read favorite flag for the current user - if _, ok := mdKeysMap[_favoriteKey]; returnAllKeys || ok { - favorite := "" - if u, ok := user.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if val, err := xattr.Get(nodePath, fa); err == nil { - sublog.Debug(). - Str("favorite", fa). - Msg("found favorite flag") - favorite = string(val) - } - } else { - sublog.Error().Err(errtypes.UserRequired("userrequired")).Msg("user has no id") - } - } else { - sublog.Error().Err(errtypes.UserRequired("userrequired")).Msg("error getting user from ctx") - } - metadata[_favoriteKey] = favorite - } - - // share indicator - if _, ok := mdKeysMap[_shareTypesKey]; returnAllKeys || ok { - if n.hasUserShares(ctx) { - metadata[_shareTypesKey] = _userShareType - } - } - - // checksums - if _, ok := mdKeysMap[_checksumsKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_FILE) && returnAllKeys || ok { - // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? - readChecksumIntoResourceChecksum(ctx, nodePath, storageprovider.XSSHA1, ri) - readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSMD5, ri) - readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSAdler32, ri) - } - - // quota - if _, ok := mdKeysMap[_quotaKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER) && returnAllKeys || ok { - var quotaPath string - if n.lu.Options.EnableHome { - if r, err := n.lu.HomeNode(ctx); err == nil { - quotaPath = n.lu.toInternalPath(r.ID) - readQuotaIntoOpaque(ctx, quotaPath, ri) - } else { - sublog.Error().Err(err).Msg("error determining home node for quota") - } - } else { - if r, err := n.lu.RootNode(ctx); err == nil { - quotaPath = n.lu.toInternalPath(r.ID) - readQuotaIntoOpaque(ctx, quotaPath, ri) - } else { - sublog.Error().Err(err).Msg("error determining root node for quota") - } - } - } - - // only read the requested metadata attributes - attrs, err := xattr.List(nodePath) - if err != nil { - sublog.Error().Err(err).Msg("error getting list of extended attributes") - } else { - for i := range attrs { - // filter out non-custom properties - if !strings.HasPrefix(attrs[i], metadataPrefix) { - continue - } - // only read when key was requested - k := attrs[i][len(metadataPrefix):] - if _, ok := mdKeysMap[k]; returnAllKeys || ok { - if val, err := xattr.Get(nodePath, attrs[i]); err == nil { - metadata[k] = string(val) - } else { - sublog.Error().Err(err). - Str("entry", attrs[i]). - Msg("error retrieving xattr metadata") - } - } - - } - } - ri.ArbitraryMetadata = &provider.ArbitraryMetadata{ - Metadata: metadata, - } - - sublog.Debug(). - Interface("ri", ri). - Msg("AsResourceInfo") - - return ri, nil -} - -func readChecksumIntoResourceChecksum(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, checksumPrefix+algo) - switch { - case err == nil: - ri.Checksum = &provider.ResourceChecksum{ - Type: storageprovider.PKG2GRPCXS(algo), - Sum: hex.EncodeToString(v), - } - case isNoData(err): - appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("checksum not set") - case isNotFound(err): - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("file not fount") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("could not read checksum") - } -} -func readChecksumIntoOpaque(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, checksumPrefix+algo) - switch { - case err == nil: - if ri.Opaque == nil { - ri.Opaque = &types.Opaque{ - Map: map[string]*types.OpaqueEntry{}, - } - } - ri.Opaque.Map[algo] = &types.OpaqueEntry{ - Decoder: "plain", - Value: []byte(hex.EncodeToString(v)), - } - case isNoData(err): - appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("checksum not set") - case isNotFound(err): - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("file not fount") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("could not read checksum") - } -} - -// quota is always stored on the root node -func readQuotaIntoOpaque(ctx context.Context, nodePath string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, quotaAttr) - switch { - case err == nil: - // make sure we have a proper signed int - // we use the same magic numbers to indicate: - // -1 = uncalculated - // -2 = unknown - // -3 = unlimited - if _, err := strconv.ParseInt(string(v), 10, 64); err == nil { - if ri.Opaque == nil { - ri.Opaque = &types.Opaque{ - Map: map[string]*types.OpaqueEntry{}, - } - } - ri.Opaque.Map[_quotaKey] = &types.OpaqueEntry{ - Decoder: "plain", - Value: v, - } - } else { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("quota", string(v)).Msg("malformed quota") - } - case isNoData(err): - appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Msg("quota not set") - case isNotFound(err): - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("file not found when reading quota") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read quota") - } -} - -// CalculateTreeSize will sum up the size of all children of a node -func (n *Node) CalculateTreeSize(ctx context.Context) (uint64, error) { - var size uint64 - // TODO check if this is a dir? - nodePath := n.lu.toInternalPath(n.ID) - - f, err := os.Open(nodePath) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not open dir") - return 0, err - } - defer f.Close() - - names, err := f.Readdirnames(0) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read dirnames") - return 0, err - } - for i := range names { - cPath := filepath.Join(nodePath, names[i]) - info, err := os.Stat(cPath) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not stat child entry") - continue // continue after an error - } - if !info.IsDir() { - size += uint64(info.Size()) - } else { - // read from attr - var b []byte - // xattr.Get will follow the symlink - if b, err = xattr.Get(cPath, treesizeAttr); err != nil { - // TODO recursively descend and recalculate treesize - continue // continue after an error - } - csize, err := strconv.ParseUint(string(b), 10, 64) - if err != nil { - // TODO recursively descend and recalculate treesize - continue // continue after an error - } - size += csize - } - } - return size, err - -} - -// HasPropagation checks if the propagation attribute exists and is set to "1" -func (n *Node) HasPropagation() (propagation bool) { - if b, err := xattr.Get(n.lu.toInternalPath(n.ID), propagationAttr); err == nil { - return string(b) == "1" - } - return false -} - -// GetTMTime reads the tmtime from the extended attributes -func (n *Node) GetTMTime() (tmTime time.Time, err error) { - var b []byte - if b, err = xattr.Get(n.lu.toInternalPath(n.ID), treeMTimeAttr); err != nil { - return - } - return time.Parse(time.RFC3339Nano, string(b)) -} - -// SetTMTime writes the tmtime to the extended attributes -func (n *Node) SetTMTime(t time.Time) (err error) { - return xattr.Set(n.lu.toInternalPath(n.ID), treeMTimeAttr, []byte(t.UTC().Format(time.RFC3339Nano))) -} - -// GetTreeSize reads the treesize from the extended attributes -func (n *Node) GetTreeSize() (treesize uint64, err error) { - var b []byte - if b, err = xattr.Get(n.lu.toInternalPath(n.ID), treesizeAttr); err != nil { - return - } - return strconv.ParseUint(string(b), 10, 64) -} - -// SetTreeSize writes the treesize to the extended attributes -func (n *Node) SetTreeSize(ts uint64) (err error) { - return xattr.Set(n.lu.toInternalPath(n.ID), treesizeAttr, []byte(strconv.FormatUint(ts, 10))) -} - -// SetChecksum writes the checksum with the given checksum type to the extended attributes -func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { - return xattr.Set(n.lu.toInternalPath(n.ID), checksumPrefix+csType, h.Sum(nil)) -} - -// UnsetTempEtag removes the temporary etag attribute -func (n *Node) UnsetTempEtag() (err error) { - if err = xattr.Remove(n.lu.toInternalPath(n.ID), tmpEtagAttr); err != nil { - if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - return nil - } - } - return err -} - -// ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes -func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *provider.ResourcePermissions, err error) { - // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") - return noPermissions, err - } - if o.OpaqueId == "" { - // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner - // TODO what if no owner is set but grants are present? - return noOwnerPermissions, nil - } - if isSameUserID(u.Id, o) { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") - return ownerPermissions, nil - } - - ap = &provider.ResourcePermissions{} - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - var g *provider.Grant - - // we read all grantees from the node - var grantees []string - if grantees, err = n.ListGrantees(ctx); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("error listing grantees") - return nil, err - } - - // instead of making n getxattr syscalls we are going to list the acls and filter them here - // we have two options here: - // 1. we can start iterating over the acls / grants on the node or - // 2. we can iterate over the number of groups - // The current implementation tries to be defensive for cases where users have hundreds or thousands of groups, so we iterate over the existing acls. - userace := grantPrefix + _userAcePrefix + u.Id.OpaqueId - userFound := false - for i := range grantees { - switch { - // we only need to find the user once - case !userFound && grantees[i] == userace: - g, err = n.ReadGrant(ctx, grantees[i]) - case strings.HasPrefix(grantees[i], grantPrefix+_groupAcePrefix): // only check group grantees - gr := strings.TrimPrefix(grantees[i], grantPrefix+_groupAcePrefix) - if groupsMap[gr] { - g, err = n.ReadGrant(ctx, grantees[i]) - } else { - // no need to check attribute - continue - } - default: - // no need to check attribute - continue - } - - switch { - case err == nil: - addPermissions(ap, g.GetPermissions()) - case isNoData(err): - err = nil - appctx.GetLogger(ctx).Error().Interface("node", n).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") - // continue with next segment - default: - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Str("grant", grantees[i]).Msg("error reading permissions") - // continue with next segment - } - } - - appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n).Interface("user", u).Msg("returning aggregated permissions") - return ap, nil -} - -// ListGrantees lists the grantees of the current node -// We don't want to wast time and memory by creating grantee objects. -// The function will return a list of opaque strings that can be used to make a ReadGrant call -func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error) { - var attrs []string - if attrs, err = xattr.List(n.lu.toInternalPath(n.ID)); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("error listing attributes") - return nil, err - } - for i := range attrs { - if strings.HasPrefix(attrs[i], grantPrefix) { - grantees = append(grantees, attrs[i]) - } - } - return -} - -// ReadGrant reads a CS3 grant -func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant, err error) { - var b []byte - if b, err = xattr.Get(n.lu.toInternalPath(n.ID), grantee); err != nil { - return nil, err - } - var e *ace.ACE - if e, err = ace.Unmarshal(strings.TrimPrefix(grantee, grantPrefix), b); err != nil { - return nil, err - } - return e.Grant(), nil -} - -func (n *Node) hasUserShares(ctx context.Context) bool { - g, err := n.ListGrantees(ctx) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("hasUserShares: listGrantees") - return false - } - - for i := range g { - if strings.Contains(g[i], grantPrefix+_userAcePrefix) { - return true - } - } - return false -} diff --git a/pkg/storage/fs/ocis/ocis.go b/pkg/storage/fs/ocis/ocis.go index fe51235e81..bbce1e30e2 100644 --- a/pkg/storage/fs/ocis/ocis.go +++ b/pkg/storage/fs/ocis/ocis.go @@ -19,554 +19,31 @@ package ocis import ( - "context" - "io" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" + "path" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/fs/ocis/blobstore" "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/cs3org/reva/pkg/user" - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -const ( - // TODO the below comment is currently copied from the owncloud driver, revisit - // Currently,extended file attributes have four separated - // namespaces (user, trusted, security and system) followed by a dot. - // A non root user can only manipulate the user. namespace, which is what - // we will use to store ownCloud specific metadata. To prevent name - // collisions with other apps We are going to introduce a sub namespace - // "user.ocis." - - ocisPrefix string = "user.ocis." - parentidAttr string = ocisPrefix + "parentid" - ownerIDAttr string = ocisPrefix + "owner.id" - ownerIDPAttr string = ocisPrefix + "owner.idp" - // the base name of the node - // updated when the file is renamed or moved - nameAttr string = ocisPrefix + "name" - - // grantPrefix is the prefix for sharing related extended attributes - grantPrefix string = ocisPrefix + "grant." - metadataPrefix string = ocisPrefix + "md." - - // favorite flag, per user - favPrefix string = ocisPrefix + "fav." - - // a temporary etag for a folder that is removed when the mtime propagation happens - tmpEtagAttr string = ocisPrefix + "tmp.etag" - referenceAttr string = ocisPrefix + "cs3.ref" // target of a cs3 reference - checksumPrefix string = ocisPrefix + "cs." // followed by the algorithm, eg. ocis.cs.sha1 - trashOriginAttr string = ocisPrefix + "trash.origin" // trash origin - - // we use a single attribute to enable or disable propagation of both: synctime and treesize - propagationAttr string = ocisPrefix + "propagation" - - // the tree modification time of the tree below this node, - // propagated when synctime_accounting is true and - // user.ocis.propagation=1 is set - // stored as a readable time.RFC3339Nano - treeMTimeAttr string = ocisPrefix + "tmtime" - - // the size of the tree below this node, - // propagated when treesize_accounting is true and - // user.ocis.propagation=1 is set - // stored as uint64, little endian - treesizeAttr string = ocisPrefix + "treesize" - - // the quota for the storage space / tree, regardless who accesses it - quotaAttr string = ocisPrefix + "quota" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" ) func init() { registry.Register("ocis", New) } -func parseConfig(m map[string]interface{}) (*Options, error) { - o := &Options{} - if err := mapstructure.Decode(m, o); err != nil { - err = errors.Wrap(err, "error decoding conf") - return nil, err - } - return o, nil -} - -func (o *Options) init(m map[string]interface{}) { - if o.UserLayout == "" { - o.UserLayout = "{{.Id.OpaqueId}}" - } - // ensure user layout has no starting or trailing / - o.UserLayout = strings.Trim(o.UserLayout, "/") - - if o.ShareFolder == "" { - o.ShareFolder = "/Shares" - } - // ensure share folder always starts with slash - o.ShareFolder = filepath.Join("/", o.ShareFolder) - - // c.DataDirectory should never end in / unless it is the root - o.Root = filepath.Clean(o.Root) -} - // New returns an implementation to of the storage.FS interface that talk to // a local filesystem. func New(m map[string]interface{}) (storage.FS, error) { - o, err := parseConfig(m) + o, err := options.New(m) if err != nil { return nil, err } - o.init(m) - - // create data paths for internal layout - dataPaths := []string{ - filepath.Join(o.Root, "nodes"), - // notes contain symlinks from nodes//uploads/ to ../../uploads/ - // better to keep uploads on a fast / volatile storage before a workflow finally moves them to the nodes dir - filepath.Join(o.Root, "uploads"), - filepath.Join(o.Root, "trash"), - } - for _, v := range dataPaths { - if err := os.MkdirAll(v, 0700); err != nil { - logger.New().Error().Err(err). - Str("path", v). - Msg("could not create data dir") - } - } - - lu := &Lookup{ - Options: o, - } - - // the root node has an empty name - // the root node has no parent - if err = createNode( - &Node{lu: lu, ID: "root"}, - &userv1beta1.UserId{ - OpaqueId: o.Owner, - }, - ); err != nil { - return nil, err - } - - tp, err := NewTree(lu) - if err != nil { - return nil, err - } - - return &ocisfs{ - tp: tp, - lu: lu, - o: o, - p: &Permissions{lu: lu}, - chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), - }, nil -} - -type ocisfs struct { - tp TreePersistence - lu *Lookup - o *Options - p *Permissions - chunkHandler *chunking.ChunkHandler -} - -func (fs *ocisfs) Shutdown(ctx context.Context) error { - return nil -} - -// TODO Document in the cs3 should we return quota or free space? -func (fs *ocisfs) GetQuota(ctx context.Context) (uint64, uint64, error) { - var node *Node - var err error - if node, err = fs.lu.HomeOrRootNode(ctx); err != nil { - return 0, 0, err - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return 0, 0, err - } - - rp, err := fs.p.AssemblePermissions(ctx, node) - switch { - case err != nil: - return 0, 0, errtypes.InternalError(err.Error()) - case !rp.GetQuota: - return 0, 0, errtypes.PermissionDenied(node.ID) - } - - ri, err := node.AsResourceInfo(ctx, rp, []string{"treesize", "quota"}) - if err != nil { - return 0, 0, err - } - - quotaStr := _quotaUnknown - if ri.Opaque != nil && ri.Opaque.Map != nil && ri.Opaque.Map["quota"] != nil && ri.Opaque.Map["quota"].Decoder == "plain" { - quotaStr = string(ri.Opaque.Map["quota"].Value) - } - - avail, err := fs.getAvailableSize(fs.lu.toInternalPath(node.ID)) - if err != nil { - return 0, 0, err - } - total := avail + ri.Size - - switch { - case quotaStr == _quotaUncalculated, quotaStr == _quotaUnknown, quotaStr == _quotaUnlimited: - // best we can do is return current total - // TODO indicate unlimited total? -> in opaque data? - default: - if quota, err := strconv.ParseUint(quotaStr, 10, 64); err == nil { - if total > quota { - total = quota - } - } - } - return total, ri.Size, nil -} - -// CreateHome creates a new root node that has no parent id -func (fs *ocisfs) CreateHome(ctx context.Context) (err error) { - if !fs.o.EnableHome || fs.o.UserLayout == "" { - return errtypes.NotSupported("ocisfs: CreateHome() home supported disabled") - } - - var n, h *Node - if n, err = fs.lu.RootNode(ctx); err != nil { - return - } - h, err = fs.lu.WalkPath(ctx, n, fs.lu.mustGetUserLayout(ctx), func(ctx context.Context, n *Node) error { - if !n.Exists { - if err := fs.tp.CreateDir(ctx, n); err != nil { - return err - } - } - return nil - }) - if err != nil { - return - } - - // update the owner - u := user.ContextMustGetUser(ctx) - if err = h.writeMetadata(u.Id); err != nil { - return - } - - if fs.o.TreeTimeAccounting { - homePath := h.lu.toInternalPath(h.ID) - // mark the home node as the end of propagation - if err = xattr.Set(homePath, propagationAttr, []byte("1")); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", h).Msg("could not mark home as propagation root") - return - } - } - return -} - -// GetHome is called to look up the home path for a user -// It is NOT supposed to return the internal path but the external path -func (fs *ocisfs) GetHome(ctx context.Context) (string, error) { - if !fs.o.EnableHome || fs.o.UserLayout == "" { - return "", errtypes.NotSupported("ocisfs: GetHome() home supported disabled") - } - u := user.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.o.UserLayout) - return filepath.Join(fs.o.Root, layout), nil // TODO use a namespace? -} - -// Tree persistence - -// GetPathByID returns the fn pointed by the file id, without the internal namespace -func (fs *ocisfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { - return fs.tp.GetPathByID(ctx, id) -} - -func (fs *ocisfs) CreateDir(ctx context.Context, fn string) (err error) { - var n *Node - if n, err = fs.lu.NodeFromPath(ctx, fn); err != nil { - return - } - - if n.Exists { - return errtypes.AlreadyExists(fn) - } - - pn, err := n.Parent() - if err != nil { - return errors.Wrap(err, "ocisfs: error getting parent "+n.ParentID) - } - ok, err := fs.p.HasPermission(ctx, pn, func(rp *provider.ResourcePermissions) bool { - return rp.CreateContainer - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - err = fs.tp.CreateDir(ctx, n) - - if fs.o.TreeTimeAccounting { - nodePath := n.lu.toInternalPath(n.ID) - // mark the home node as the end of propagation - if err = xattr.Set(nodePath, propagationAttr, []byte("1")); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") - return - } - } - return -} - -// CreateReference creates a reference as a node folder with the target stored in extended attributes -// There is no difference between the /Shares folder and normal nodes because the storage is not supposed to be accessible without the storage provider. -// In effect everything is a shadow namespace. -// To mimic the eos end owncloud driver we only allow references as children of the "/Shares" folder -// TODO when home support is enabled should the "/Shares" folder still be listed? -func (fs *ocisfs) CreateReference(ctx context.Context, p string, targetURI *url.URL) (err error) { - - p = strings.Trim(p, "/") - parts := strings.Split(p, "/") - - if len(parts) != 2 { - return errtypes.PermissionDenied("ocisfs: references must be a child of the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) - } - - if parts[0] != strings.Trim(fs.o.ShareFolder, "/") { - return errtypes.PermissionDenied("ocisfs: cannot create references outside the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) - } - - // create Shares folder if it does not exist - var n *Node - if n, err = fs.lu.NodeFromPath(ctx, fs.o.ShareFolder); err != nil { - return errtypes.InternalError(err.Error()) - } else if !n.Exists { - if err = fs.tp.CreateDir(ctx, n); err != nil { - return - } - } - - if n, err = n.Child(parts[1]); err != nil { - return errtypes.InternalError(err.Error()) - } - - if n.Exists { - // TODO append increasing number to mountpoint name - return errtypes.AlreadyExists(p) - } - - if err = fs.tp.CreateDir(ctx, n); err != nil { - return - } - - internal := n.lu.toInternalPath(n.ID) - if err = xattr.Set(internal, referenceAttr, []byte(targetURI.String())); err != nil { - return errors.Wrapf(err, "ocisfs: error setting the target %s on the reference file %s", targetURI.String(), internal) - } - return nil -} - -func (fs *ocisfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { - var oldNode, newNode *Node - if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil { - return - } - - if !oldNode.Exists { - err = errtypes.NotFound(filepath.Join(oldNode.ParentID, oldNode.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, oldNode, func(rp *provider.ResourcePermissions) bool { - return rp.Move - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(oldNode.ID) - } - - if newNode, err = fs.lu.NodeFromResource(ctx, newRef); err != nil { - return - } - if newNode.Exists { - err = errtypes.AlreadyExists(filepath.Join(newNode.ParentID, newNode.Name)) - return - } - - return fs.tp.Move(ctx, oldNode, newNode) -} - -func (fs *ocisfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (ri *provider.ResourceInfo, err error) { - var node *Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - rp, err := fs.p.AssemblePermissions(ctx, node) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !rp.Stat: - return nil, errtypes.PermissionDenied(node.ID) - } - - return node.AsResourceInfo(ctx, rp, mdKeys) -} - -func (fs *ocisfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) (finfos []*provider.ResourceInfo, err error) { - var node *Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - rp, err := fs.p.AssemblePermissions(ctx, node) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !rp.ListContainer: - return nil, errtypes.PermissionDenied(node.ID) - } - - var children []*Node - children, err = fs.tp.ListFolder(ctx, node) - if err != nil { - return - } - - for i := range children { - np := rp - // add this childs permissions - addPermissions(np, node.PermissionSet(ctx)) - if ri, err := children[i].AsResourceInfo(ctx, np, mdKeys); err == nil { - finfos = append(finfos, ri) - } - } - return -} - -func (fs *ocisfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { - var node *Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.Delete - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - return fs.tp.Delete(ctx, node) -} - -// Data persistence - -func (fs *ocisfs) ContentPath(n *Node) string { - return n.lu.toInternalPath(n.ID) -} - -func (fs *ocisfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { - node, err := fs.lu.NodeFromResource(ctx, ref) + bs, err := blobstore.New(path.Join(o.Root, "blobs")) if err != nil { - return nil, errors.Wrap(err, "ocisfs: error resolving ref") - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) return nil, err } - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.InitiateFileDownload - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - contentPath := fs.ContentPath(node) - - r, err := os.Open(contentPath) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(contentPath) - } - return nil, errors.Wrap(err, "ocisfs: error reading "+contentPath) - } - return r, nil -} - -// arbitrary metadata persistence in metadata.go - -// Version persistence in revisions.go - -// Trash persistence in recycle.go - -// share persistence in grants.go - -func (fs *ocisfs) copyMD(s string, t string) (err error) { - var attrs []string - if attrs, err = xattr.List(s); err != nil { - return err - } - for i := range attrs { - if strings.HasPrefix(attrs[i], ocisPrefix) { - var d []byte - if d, err = xattr.Get(s, attrs[i]); err != nil { - return err - } - if err = xattr.Set(t, attrs[i], d); err != nil { - return err - } - } - } - return nil -} - -func isSameUserID(i *userpb.UserId, j *userpb.UserId) bool { - switch { - case i == nil, j == nil: - return false - case i.OpaqueId == j.OpaqueId && i.Idp == j.Idp: - return true - default: - return false - } + return decomposedfs.NewDefault(m, bs) } diff --git a/pkg/storage/fs/ocis/ocis_suite_test.go b/pkg/storage/fs/ocis/ocis_suite_test.go new file mode 100644 index 0000000000..f42a46046a --- /dev/null +++ b/pkg/storage/fs/ocis/ocis_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocis_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOcis(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Ocis Suite") +} diff --git a/pkg/storage/fs/ocis/ocis_test.go b/pkg/storage/fs/ocis/ocis_test.go index cc13efc343..6bb4b49971 100644 --- a/pkg/storage/fs/ocis/ocis_test.go +++ b/pkg/storage/fs/ocis/ocis_test.go @@ -16,159 +16,46 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -// +build storageRace - -package ocis +package ocis_test import ( - "context" - "fmt" "io/ioutil" "os" - "path" - "sync" - "testing" + "strings" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/user" - "github.com/stretchr/testify/assert" -) + "github.com/cs3org/reva/pkg/storage/fs/ocis" -// TestLackAdvisoryLocks demonstrates that access to a file -// is not mutually exclusive on the oCIS storage. -var ( - config = make(map[string]interface{}) - ctx context.Context - f, f1 *os.File - tmpDir string + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) -func TestMain(m *testing.M) { - var err error +var _ = Describe("Ocis", func() { + var ( + options map[string]interface{} + tmpRoot string + ) - // prepare storage - { - tmpDir, _ = ioutil.TempDir("", "ocis_fs_unittests") - { - config["root"] = tmpDir - config["enable_home"] = false - config["user_layout"] = "{{.Id.OpaqueId}}" - config["owner"] = "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c" - } - } + BeforeEach(func() { + tmpRoot, err := ioutil.TempDir("", "reva-unit-tests-*-root") + Expect(err).ToNot(HaveOccurred()) - // prepare context - { - u := &userpb.User{ - Id: &userpb.UserId{ - OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c", - }, - Username: "test", - Mail: "marie@example.org", - DisplayName: "Marie Curie", - Groups: []string{ - "radium-lovers", - "polonium-lovers", - "physics-lovers", - }, + options = map[string]interface{}{ + "root": tmpRoot, + "enable_home": true, + "share_folder": "/Shares", } - ctx = user.ContextSetUser(context.Background(), u) - } - - // do not do this. Prepare f0 - if err = ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpDir, "f.lol"), []byte("test"), 0644); err != nil { - panic(err) - } - f, err = os.Open(fmt.Sprintf("%s/%s", tmpDir, "f.lol")) - if err != nil { - panic(err) - } - - // do not do this. Prepare f1 - if err = ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpDir, "f1.lol"), []byte("another run"), 0644); err != nil { - panic(err) - } - f1, err = os.Open(fmt.Sprintf("%s/%s", tmpDir, "f1.lol")) - if err != nil { - panic(err) - } - - fmt.Printf("%s\n", tmpDir) - m.Run() - - cts, err := ioutil.ReadFile(path.Join(tmpDir, "nodes", "root", "uploaded.txt")) - if err != nil { - panic(err) - } - fmt.Println(string(cts)) -} - -// Scenario: start 2 uploads, pause the first one, let the second one finish first, -// resume the first one at some point in time. Both uploads should finish. -// Needs to result in 2 versions, last finished is the most recent version. -func TestTwoUploadsVersioning(t *testing.T) { - //runtime.GOMAXPROCS(1) // uncomment to remove concurrency and see revisions working. - ofs, err := New(config) - if err != nil { - t.Error(err) - } - - wg := &sync.WaitGroup{} - wg.Add(2) - - // upload file with contents: "test" - go func(wg *sync.WaitGroup) { - ofs.Upload(ctx, &provider.Reference{ - Spec: &provider.Reference_Path{Path: "uploaded.txt"}, - }, f) - wg.Done() - }(wg) - - // upload file with contents: "another run" - go func(wg *sync.WaitGroup) { - ofs.Upload(ctx, &provider.Reference{ - Spec: &provider.Reference_Path{Path: "uploaded.txt"}, - }, f1) - wg.Done() - }(wg) - - // this test, by the way the oCIS storage is implemented, is non-deterministic, and the contents - // of uploaded.txt will change on each run depending on which of the 2 routines above makes it - // first into the scheduler. In order to make it deterministic, we have to consider the Upload impl- - // ementation and we can leverage concurrency and add locks only when the destination path are the - // same for 2 uploads. - - wg.Wait() - revisions, err := ofs.ListRevisions(ctx, &provider.Reference{ - Spec: &provider.Reference_Path{Path: "uploaded.txt"}, }) - assert.NoError(t, err) - assert.Equal(t, 1, len(revisions)) -} -// TestParallelMkcol ensures that, on an unit level, if multiple requests fight for creating a directory (race condition) -// only the first one will create it. Note that there is little to synchronize here because if the folder is already -// created, the underlying filesystem (not the storage driver layer) will fail when attempting to create the directory. -func TestParallelMkcol(t *testing.T) { - ofs, err := New(config) - if err != nil { - t.Error(err) - } - - for i := 0; i < 10; i++ { - t.Run("", func(t *testing.T) { - t.Parallel() - if err := ofs.CreateDir(ctx, "fightforit"); err != nil { - rinfo, err := ofs.GetMD(ctx, &provider.Reference{ - Spec: &provider.Reference_Path{Path: "fightforit"}, - }, nil) - if err != nil { - t.Error(err) - } + AfterEach(func() { + if strings.HasPrefix(tmpRoot, os.TempDir()) { + os.RemoveAll(tmpRoot) + } + }) - assert.NotNil(t, rinfo) - } + Describe("New", func() { + It("returns a new instance", func() { + _, err := ocis.New(options) + Expect(err).ToNot(HaveOccurred()) }) - } -} + }) +}) diff --git a/pkg/storage/fs/ocis/permissions.go b/pkg/storage/fs/ocis/permissions.go deleted file mode 100644 index 55077a3cc5..0000000000 --- a/pkg/storage/fs/ocis/permissions.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "strings" - "syscall" - - userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/user" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -const ( - _userAcePrefix = "u:" - _groupAcePrefix = "g:" -) - -var noPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ - // no permissions -} - -// permissions for nodes that don't have an owner set, eg the root node -var noOwnerPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ - Stat: true, -} -var ownerPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ - // all permissions - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, -} - -// Permissions implements permission checks -type Permissions struct { - lu *Lookup -} - -// AssemblePermissions will assemble the permissions for the current user on the given node, taking into account all parent nodes -func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap *provider.ResourcePermissions, err error) { - u, ok := user.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") - return noPermissions, nil - } - // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") - return noPermissions, err - } - if o.OpaqueId == "" { - // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner - // TODO what if no owner is set but grants are present? - return noOwnerPermissions, nil - } - if isSameUserID(u.Id, o) { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") - return ownerPermissions, nil - } - - // determine root - var rn *Node - if rn, err = p.lu.RootNode(ctx); err != nil { - return nil, err - } - - cn := n - - ap = &provider.ResourcePermissions{} - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - // for all segments, starting at the leaf - for cn.ID != rn.ID { - - if np, err := cn.ReadUserPermissions(ctx, u); err == nil { - addPermissions(ap, np) - } else { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Msg("error reading permissions") - // continue with next segment - } - - if cn, err = cn.Parent(); err != nil { - return ap, errors.Wrap(err, "ocisfs: error getting parent "+cn.ParentID) - } - } - - appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n).Interface("user", u).Msg("returning agregated permissions") - return ap, nil -} - -// TODO we should use a bitfield for this ... -func addPermissions(l *provider.ResourcePermissions, r *provider.ResourcePermissions) { - l.AddGrant = l.AddGrant || r.AddGrant - l.CreateContainer = l.CreateContainer || r.CreateContainer - l.Delete = l.Delete || r.Delete - l.GetPath = l.GetPath || r.GetPath - l.GetQuota = l.GetQuota || r.GetQuota - l.InitiateFileDownload = l.InitiateFileDownload || r.InitiateFileDownload - l.InitiateFileUpload = l.InitiateFileUpload || r.InitiateFileUpload - l.ListContainer = l.ListContainer || r.ListContainer - l.ListFileVersions = l.ListFileVersions || r.ListFileVersions - l.ListGrants = l.ListGrants || r.ListGrants - l.ListRecycle = l.ListRecycle || r.ListRecycle - l.Move = l.Move || r.Move - l.PurgeRecycle = l.PurgeRecycle || r.PurgeRecycle - l.RemoveGrant = l.RemoveGrant || r.RemoveGrant - l.RestoreFileVersion = l.RestoreFileVersion || r.RestoreFileVersion - l.RestoreRecycleItem = l.RestoreRecycleItem || r.RestoreRecycleItem - l.Stat = l.Stat || r.Stat - l.UpdateGrant = l.UpdateGrant || r.UpdateGrant -} - -// HasPermission call check() for every node up to the root until check returns true -func (p *Permissions) HasPermission(ctx context.Context, n *Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) { - - var u *userv1beta1.User - var perms *provider.ResourcePermissions - if u, perms = p.getUserAndPermissions(ctx, n); perms != nil { - return check(perms), nil - } - - // determine root - var rn *Node - if rn, err = p.lu.RootNode(ctx); err != nil { - return false, err - } - - cn := n - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - var g *provider.Grant - // for all segments, starting at the leaf - for cn.ID != rn.ID { - - var grantees []string - if grantees, err = cn.ListGrantees(ctx); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Msg("error listing grantees") - return false, err - } - - userace := grantPrefix + _userAcePrefix + u.Id.OpaqueId - userFound := false - for i := range grantees { - // we only need the find the user once per node - switch { - case !userFound && grantees[i] == userace: - g, err = cn.ReadGrant(ctx, grantees[i]) - case strings.HasPrefix(grantees[i], grantPrefix+_groupAcePrefix): - gr := strings.TrimPrefix(grantees[i], grantPrefix+_groupAcePrefix) - if groupsMap[gr] { - g, err = cn.ReadGrant(ctx, grantees[i]) - } else { - // no need to check attribute - continue - } - default: - // no need to check attribute - continue - } - - switch { - case err == nil: - appctx.GetLogger(ctx).Debug().Interface("node", cn).Str("grant", grantees[i]).Interface("permissions", g.GetPermissions()).Msg("checking permissions") - if check(g.GetPermissions()) { - return true, nil - } - case isNoData(err): - err = nil - appctx.GetLogger(ctx).Error().Interface("node", cn).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") - default: - appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Str("grant", grantees[i]).Msg("error reading permissions") - return false, err - } - } - - if cn, err = cn.Parent(); err != nil { - return false, errors.Wrap(err, "ocisfs: error getting parent "+cn.ParentID) - } - } - - appctx.GetLogger(ctx).Debug().Interface("permissions", noPermissions).Interface("node", n).Interface("user", u).Msg("no grant found, returning default permissions") - return false, nil -} - -func (p *Permissions) getUserAndPermissions(ctx context.Context, n *Node) (*userv1beta1.User, *provider.ResourcePermissions) { - u, ok := user.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") - return nil, noPermissions - } - // check if the current user is the owner - o, err := n.Owner() - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") - return nil, noPermissions - } - if o.OpaqueId == "" { - // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner - // TODO what if no owner is set but grants are present? - return nil, noOwnerPermissions - } - if isSameUserID(u.Id, o) { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") - return u, ownerPermissions - } - return u, nil -} -func isNoData(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENODATA - } - } - return false -} - -// The os not exists error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isNotFound(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENOENT - } - } - return false -} diff --git a/pkg/storage/fs/ocis/recycle.go b/pkg/storage/fs/ocis/recycle.go deleted file mode 100644 index cc81b849d0..0000000000 --- a/pkg/storage/fs/ocis/recycle.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "os" - "path/filepath" - "strings" - "time" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/user" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// Recycle items are stored inside the node folder and start with the uuid of the deleted node. -// The `.T.` indicates it is a trash item and what follows is the timestamp of the deletion. -// The deleted file is kept in the same location/dir as the original node. This prevents deletes -// from triggering cross storage moves when the trash is accidentally stored on another partition, -// because the admin mounted a different partition there. -// TODO For an efficient listing of deleted nodes the ocis storages trash folder should have -// contain a directory with symlinks to trash files for every userid/"root" - -func (fs *ocisfs) ListRecycle(ctx context.Context) (items []*provider.RecycleItem, err error) { - log := appctx.GetLogger(ctx) - - trashRoot := fs.getRecycleRoot(ctx) - - items = make([]*provider.RecycleItem, 0) - - // TODO how do we check if the storage allows listing the recycle for the current user? check owner of the root of the storage? - // use permissions ReadUserPermissions? - if fs.o.EnableHome { - if !ownerPermissions.ListContainer { - log.Debug().Msg("owner not allowed to list trash") - return items, errtypes.PermissionDenied("owner not allowed to list trash") - } - } else { - if !noPermissions.ListContainer { - log.Debug().Msg("default permissions prevent listing trash") - return items, errtypes.PermissionDenied("default permissions prevent listing trash") - } - } - - f, err := os.Open(trashRoot) - if err != nil { - if os.IsNotExist(err) { - return items, nil - } - return nil, errors.Wrap(err, "tree: error listing "+trashRoot) - } - defer f.Close() - - names, err := f.Readdirnames(0) - if err != nil { - return nil, err - } - for i := range names { - var trashnode string - trashnode, err = os.Readlink(filepath.Join(trashRoot, names[i])) - if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Msg("error reading trash link, skipping") - err = nil - continue - } - parts := strings.SplitN(filepath.Base(trashnode), ".T.", 2) - if len(parts) != 2 { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") - continue - } - - nodePath := fs.lu.toInternalPath(filepath.Base(trashnode)) - md, err := os.Stat(nodePath) - if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode).Interface("parts", parts).Msg("could not stat trash item, skipping") - continue - } - - item := &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Size: uint64(md.Size()), - Key: filepath.Base(trashRoot) + ":" + parts[0], // glue using :, a / is interpreted as a path and only the node id will reach the other methods - } - if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { - item.DeletionTime = &types.Timestamp{ - Seconds: uint64(deletionTime.Unix()), - // TODO nanos - } - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") - } - - // lookup origin path in extended attributes - var attrBytes []byte - if attrBytes, err = xattr.Get(nodePath, trashOriginAttr); err == nil { - item.Path = string(attrBytes) - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read origin path, skipping") - continue - } - // TODO filter results by permission ... on the original parent? or the trashed node? - // if it were on the original parent it would be possible to see files that were trashed before the current user got access - // so -> check the trash node itself - // hmm listing trash currently lists the current users trash or the 'root' trash. from ocs only the home storage is queried for trash items. - // for now we can only really check if the current user is the owner - if attrBytes, err = xattr.Get(nodePath, ownerIDAttr); err == nil { - if fs.o.EnableHome { - u := user.ContextMustGetUser(ctx) - if u.Id.OpaqueId != string(attrBytes) { - log.Warn().Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("trash item not owned by current user, skipping") - continue - } - } - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read owner, skipping") - continue - } - - items = append(items, item) - } - return -} - -func (fs *ocisfs) RestoreRecycleItem(ctx context.Context, key string) (err error) { - log := appctx.GetLogger(ctx) - - var rn *Node - var trashItem string - var deletedNodePath string - var origin string - if rn, trashItem, deletedNodePath, origin, err = ReadRecycleItem(ctx, fs.lu, key); err != nil { - return - } - - // check permissions of deleted node - ok, err := fs.p.HasPermission(ctx, rn, func(rp *provider.ResourcePermissions) bool { - return rp.RestoreRecycleItem - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(key) - } - - // link to origin - var n *Node - n, err = fs.lu.NodeFromPath(ctx, origin) - if err != nil { - return - } - - if n.Exists { - return errtypes.AlreadyExists("origin already exists") - } - - // add the entry for the parent dir - err = os.Symlink("../"+rn.ID, filepath.Join(fs.lu.toInternalPath(n.ParentID), n.Name)) - if err != nil { - return - } - - // rename to node only name, so it is picked up by id - nodePath := fs.lu.toInternalPath(rn.ID) - err = os.Rename(deletedNodePath, nodePath) - if err != nil { - return - } - - n.Exists = true - - // delete item link in trash - if err = os.Remove(trashItem); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trashitem") - } - return fs.tp.Propagate(ctx, n) - -} - -func (fs *ocisfs) PurgeRecycleItem(ctx context.Context, key string) (err error) { - log := appctx.GetLogger(ctx) - - var rn *Node - var trashItem string - var deletedNodePath string - if rn, trashItem, deletedNodePath, _, err = ReadRecycleItem(ctx, fs.lu, key); err != nil { - return - } - - // check permissions of deleted node - ok, err := fs.p.HasPermission(ctx, rn, func(rp *provider.ResourcePermissions) bool { - return rp.PurgeRecycle - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(key) - } - - if err = os.RemoveAll(deletedNodePath); err != nil { - log.Error().Err(err).Str("deletedNodePath", deletedNodePath).Msg("error deleting trash node") - return - } - - // delete item link in trash - if err = os.Remove(trashItem); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item") - } - // TODO recursively delete all children - return -} - -func (fs *ocisfs) EmptyRecycle(ctx context.Context) error { - u, ok := user.ContextGetUser(ctx) - // TODO what permission should we check? we could check the root node of the user? or the owner permissions on his home root node? - // The current impl will wipe your own trash. or when no user provided the trash of 'root' - if !ok { - return os.RemoveAll(fs.getRecycleRoot(ctx)) - } - - // TODO use layout, see Tree.Delete() for problem - return os.RemoveAll(filepath.Join(fs.o.Root, "trash", u.Id.OpaqueId)) -} - -func getResourceType(isDir bool) provider.ResourceType { - if isDir { - return provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - return provider.ResourceType_RESOURCE_TYPE_FILE -} - -func (fs *ocisfs) getRecycleRoot(ctx context.Context) string { - if fs.o.EnableHome { - u := user.ContextMustGetUser(ctx) - // TODO use layout, see Tree.Delete() for problem - return filepath.Join(fs.o.Root, "trash", u.Id.OpaqueId) - } - return filepath.Join(fs.o.Root, "trash", "root") -} diff --git a/pkg/storage/fs/ocis/revisions.go b/pkg/storage/fs/ocis/revisions.go deleted file mode 100644 index 526275bf92..0000000000 --- a/pkg/storage/fs/ocis/revisions.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "io" - "os" - "path/filepath" - "strings" - "time" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/pkg/errors" -) - -// Revision entries are stored inside the node folder and start with the same uuid as the current version. -// The `.REV.` indicates it is a revision and what follows is a timestamp, so multiple versions -// can be kept in the same location as the current file content. This prevents new fileuploads -// to trigger cross storage moves when revisions accidentally are stored on another partition, -// because the admin mounted a different partition there. -// We can add a background process to move old revisions to a slower storage -// and replace the revision file with a symbolic link in the future, if necessary. - -func (fs *ocisfs) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { - var n *Node - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - return rp.ListFileVersions - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - revisions = []*provider.FileVersion{} - np := fs.lu.toInternalPath(n.ID) - if items, err := filepath.Glob(np + ".REV.*"); err == nil { - for i := range items { - if fi, err := os.Stat(items[i]); err == nil { - rev := &provider.FileVersion{ - Key: filepath.Base(items[i]), - Size: uint64(fi.Size()), - Mtime: uint64(fi.ModTime().Unix()), - } - revisions = append(revisions, rev) - } - } - } - return -} - -func (fs *ocisfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { - log := appctx.GetLogger(ctx) - - // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) - if len(kp) != 2 { - log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") - return nil, errtypes.NotFound(revisionKey) - } - log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") - - // check if the node is available and has not been deleted - n, err := ReadNode(ctx, fs.lu, kp[0]) - if err != nil { - return nil, err - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return nil, err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - // TODO add explicit permission in the CS3 api? - return rp.ListFileVersions && rp.RestoreFileVersion && rp.InitiateFileDownload - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - contentPath := fs.lu.toInternalPath(revisionKey) - - r, err := os.Open(contentPath) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(contentPath) - } - return nil, errors.Wrap(err, "ocisfs: error opening revision "+revisionKey) - } - return r, nil -} - -func (fs *ocisfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (err error) { - log := appctx.GetLogger(ctx) - - // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) - if len(kp) != 2 { - log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") - return errtypes.NotFound(revisionKey) - } - - // check if the node is available and has not been deleted - n, err := ReadNode(ctx, fs.lu, kp[0]) - if err != nil { - return err - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - return rp.RestoreFileVersion - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - // move current version to new revision - nodePath := fs.lu.toInternalPath(kp[0]) - var fi os.FileInfo - if fi, err = os.Stat(nodePath); err == nil { - // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath := fs.lu.toInternalPath(kp[0] + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) - - err = os.Rename(nodePath, versionsPath) - if err != nil { - return - } - - // copy old revision to current location - - revisionPath := fs.lu.toInternalPath(revisionKey) - var revision, destination *os.File - revision, err = os.Open(revisionPath) - if err != nil { - return - } - defer revision.Close() - - destination, err = os.OpenFile(nodePath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) - if err != nil { - return - } - defer destination.Close() - _, err = io.Copy(destination, revision) - if err != nil { - return - } - - return fs.copyMD(revisionPath, nodePath) - } - - log.Error().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("original node does not exist") - return -} diff --git a/pkg/storage/fs/ocis/tree.go b/pkg/storage/fs/ocis/tree.go deleted file mode 100644 index 0fed417286..0000000000 --- a/pkg/storage/fs/ocis/tree.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "os" - "path/filepath" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// Tree manages a hierarchical tree -type Tree struct { - lu *Lookup -} - -// NewTree creates a new Tree instance -func NewTree(lu *Lookup) (TreePersistence, error) { - return &Tree{ - lu: lu, - }, nil -} - -// GetMD returns the metadata of a node in the tree -func (t *Tree) GetMD(ctx context.Context, node *Node) (os.FileInfo, error) { - md, err := os.Stat(t.lu.toInternalPath(node.ID)) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(node.ID) - } - return nil, errors.Wrap(err, "tree: error stating "+node.ID) - } - - return md, nil -} - -// GetPathByID returns the fn pointed by the file id, without the internal namespace -func (t *Tree) GetPathByID(ctx context.Context, id *provider.ResourceId) (relativeExternalPath string, err error) { - var node *Node - node, err = t.lu.NodeFromID(ctx, id) - if err != nil { - return - } - - relativeExternalPath, err = t.lu.Path(ctx, node) - return -} - -// does not take care of linking back to parent -// TODO check if node exists? -func createNode(n *Node, owner *userpb.UserId) (err error) { - // create a directory node - nodePath := n.lu.toInternalPath(n.ID) - if err = os.MkdirAll(nodePath, 0700); err != nil { - return errors.Wrap(err, "ocisfs: error creating node") - } - - return n.writeMetadata(owner) -} - -// CreateDir creates a new directory entry in the tree -func (t *Tree) CreateDir(ctx context.Context, node *Node) (err error) { - - if node.Exists || node.ID != "" { - return errtypes.AlreadyExists(node.ID) // path? - } - - // create a directory node - node.ID = uuid.New().String() - - // who will become the owner? the owner of the parent node, not the current user - var p *Node - p, err = node.Parent() - if err != nil { - return - } - var owner *userpb.UserId - owner, err = p.Owner() - if err != nil { - return - } - - err = createNode(node, owner) - if err != nil { - return nil - } - - // make child appear in listings - err = os.Symlink("../"+node.ID, filepath.Join(t.lu.toInternalPath(node.ParentID), node.Name)) - if err != nil { - return - } - return t.Propagate(ctx, node) -} - -// Move replaces the target with the source -func (t *Tree) Move(ctx context.Context, oldNode *Node, newNode *Node) (err error) { - // if target exists delete it without trashing it - if newNode.Exists { - // TODO make sure all children are deleted - if err := os.RemoveAll(t.lu.toInternalPath(newNode.ID)); err != nil { - return errors.Wrap(err, "ocisfs: Move: error deleting target node "+newNode.ID) - } - } - - // Always target the old node ID for xattr updates. - // The new node id is empty if the target does not exist - // and we need to overwrite the new one when overwriting an existing path. - tgtPath := t.lu.toInternalPath(oldNode.ID) - - // are we just renaming (parent stays the same)? - if oldNode.ParentID == newNode.ParentID { - - parentPath := t.lu.toInternalPath(oldNode.ParentID) - - // rename child - err = os.Rename( - filepath.Join(parentPath, oldNode.Name), - filepath.Join(parentPath, newNode.Name), - ) - if err != nil { - return errors.Wrap(err, "ocisfs: could not rename child") - } - - // update name attribute - if err := xattr.Set(tgtPath, nameAttr, []byte(newNode.Name)); err != nil { - return errors.Wrap(err, "ocisfs: could not set name attribute") - } - - return t.Propagate(ctx, newNode) - } - - // we are moving the node to a new parent, any target has been removed - // bring old node to the new parent - - // rename child - err = os.Rename( - filepath.Join(t.lu.toInternalPath(oldNode.ParentID), oldNode.Name), - filepath.Join(t.lu.toInternalPath(newNode.ParentID), newNode.Name), - ) - if err != nil { - return errors.Wrap(err, "ocisfs: could not move child") - } - - // update target parentid and name - if err := xattr.Set(tgtPath, parentidAttr, []byte(newNode.ParentID)); err != nil { - return errors.Wrap(err, "ocisfs: could not set parentid attribute") - } - if err := xattr.Set(tgtPath, nameAttr, []byte(newNode.Name)); err != nil { - return errors.Wrap(err, "ocisfs: could not set name attribute") - } - - // TODO inefficient because we might update several nodes twice, only propagate unchanged nodes? - // collect in a list, then only stat each node once - // also do this in a go routine ... webdav should check the etag async - - err = t.Propagate(ctx, oldNode) - if err != nil { - return errors.Wrap(err, "ocisfs: Move: could not propagate old node") - } - err = t.Propagate(ctx, newNode) - if err != nil { - return errors.Wrap(err, "ocisfs: Move: could not propagate new node") - } - return nil -} - -// ListFolder lists the content of a folder node -func (t *Tree) ListFolder(ctx context.Context, node *Node) ([]*Node, error) { - dir := t.lu.toInternalPath(node.ID) - f, err := os.Open(dir) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(dir) - } - return nil, errors.Wrap(err, "tree: error listing "+dir) - } - defer f.Close() - - names, err := f.Readdirnames(0) - if err != nil { - return nil, err - } - nodes := []*Node{} - for i := range names { - link, err := os.Readlink(filepath.Join(dir, names[i])) - if err != nil { - // TODO log - continue - } - n := &Node{ - lu: t.lu, - ParentID: node.ID, - ID: filepath.Base(link), - Name: names[i], - Exists: true, // TODO - } - - nodes = append(nodes, n) - } - return nodes, nil -} - -// Delete deletes a node in the tree -func (t *Tree) Delete(ctx context.Context, n *Node) (err error) { - - // Prepare the trash - // TODO use layout?, but it requires resolving the owners user if the username is used instead of the id. - // the node knows the owner id so we use that for now - o, err := n.Owner() - if err != nil { - return - } - if o.OpaqueId == "" { - // fall back to root trash - o.OpaqueId = "root" - } - err = os.MkdirAll(filepath.Join(t.lu.Options.Root, "trash", o.OpaqueId), 0700) - if err != nil { - return - } - - // get the original path - origin, err := t.lu.Path(ctx, n) - if err != nil { - return - } - - // set origin location in metadata - nodePath := t.lu.toInternalPath(n.ID) - if err := xattr.Set(nodePath, trashOriginAttr, []byte(origin)); err != nil { - return err - } - - deletionTime := time.Now().UTC().Format(time.RFC3339Nano) - - // first make node appear in the owners (or root) trash - // parent id and name are stored as extended attributes in the node itself - trashLink := filepath.Join(t.lu.Options.Root, "trash", o.OpaqueId, n.ID) - err = os.Symlink("../../nodes/"+n.ID+".T."+deletionTime, trashLink) - if err != nil { - // To roll back changes - // TODO unset trashOriginAttr - return - } - - // at this point we have a symlink pointing to a non existing destination, which is fine - - // rename the trashed node so it is not picked up when traversing up the tree and matches the symlink - trashPath := nodePath + ".T." + deletionTime - err = os.Rename(nodePath, trashPath) - if err != nil { - // To roll back changes - // TODO remove symlink - // TODO unset trashOriginAttr - return - } - - // finally remove the entry from the parent dir - src := filepath.Join(t.lu.toInternalPath(n.ParentID), n.Name) - err = os.Remove(src) - if err != nil { - // To roll back changes - // TODO revert the rename - // TODO remove symlink - // TODO unset trashOriginAttr - return - } - - p, err := n.Parent() - if err != nil { - return errors.Wrap(err, "ocisfs: error getting parent "+n.ParentID) - } - return t.Propagate(ctx, p) -} - -// Propagate propagates changes to the root of the tree -func (t *Tree) Propagate(ctx context.Context, n *Node) (err error) { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - if !t.lu.Options.TreeTimeAccounting && !t.lu.Options.TreeSizeAccounting { - // no propagation enabled - sublog.Debug().Msg("propagation disabled") - return - } - - // is propagation enabled for the parent node? - - var root *Node - if root, err = t.lu.HomeOrRootNode(ctx); err != nil { - return - } - - // use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly - sTime := time.Now().UTC() - - // we loop until we reach the root - for err == nil && n.ID != root.ID { - sublog.Debug().Msg("propagating") - - // make n the parent or break the loop - if n, err = n.Parent(); err != nil { - break - } - - sublog = sublog.With().Interface("node", n).Logger() - - // TODO none, sync and async? - if !n.HasPropagation() { - sublog.Debug().Str("attr", propagationAttr).Msg("propagation attribute not set or unreadable, not propagating") - // if the attribute is not set treat it as false / none / no propagation - return nil - } - - if t.lu.Options.TreeTimeAccounting { - // update the parent tree time if it is older than the nodes mtime - updateSyncTime := false - - var tmTime time.Time - tmTime, err = n.GetTMTime() - switch { - case err != nil: - // missing attribute, or invalid format, overwrite - sublog.Debug().Err(err).Msg("could not read tmtime attribute, overwriting") - updateSyncTime = true - case tmTime.Before(sTime): - sublog.Debug(). - Time("tmtime", tmTime). - Time("stime", sTime). - Msg("parent tmtime is older than node mtime, updating") - updateSyncTime = true - default: - sublog.Debug(). - Time("tmtime", tmTime). - Time("stime", sTime). - Dur("delta", sTime.Sub(tmTime)). - Msg("parent tmtime is younger than node mtime, not updating") - } - - if updateSyncTime { - // update the tree time of the parent node - if err = n.SetTMTime(sTime); err != nil { - sublog.Error().Err(err).Time("tmtime", sTime).Msg("could not update tmtime of parent node") - } else { - sublog.Debug().Time("tmtime", sTime).Msg("updated tmtime of parent node") - } - } - - if err := n.UnsetTempEtag(); err != nil { - sublog.Error().Err(err).Msg("could not remove temporary etag attribute") - } - - } - - // size accounting - if t.lu.Options.TreeSizeAccounting { - // update the treesize if it differs from the current size - updateTreeSize := false - - var treeSize, calculatedTreeSize uint64 - calculatedTreeSize, err = n.CalculateTreeSize(ctx) - if err != nil { - continue - } - - treeSize, err = n.GetTreeSize() - switch { - case err != nil: - // missing attribute, or invalid format, overwrite - sublog.Debug().Err(err).Msg("could not read treesize attribute, overwriting") - updateTreeSize = true - case treeSize != calculatedTreeSize: - sublog.Debug(). - Uint64("treesize", treeSize). - Uint64("calculatedTreeSize", calculatedTreeSize). - Msg("parent treesize is different then calculated treesize, updating") - updateTreeSize = true - default: - sublog.Debug(). - Uint64("treesize", treeSize). - Uint64("calculatedTreeSize", calculatedTreeSize). - Msg("parent size matches calculated size, not updating") - } - - if updateTreeSize { - // update the tree time of the parent node - if err = n.SetTreeSize(calculatedTreeSize); err != nil { - sublog.Error().Err(err).Uint64("calculatedTreeSize", calculatedTreeSize).Msg("could not update treesize of parent node") - } else { - sublog.Debug().Uint64("calculatedTreeSize", calculatedTreeSize).Msg("updated treesize of parent node") - } - } - } - } - if err != nil { - sublog.Error().Err(err).Msg("error propagating") - } - return -} diff --git a/pkg/storage/fs/ocis/upload.go b/pkg/storage/fs/ocis/upload.go deleted file mode 100644 index 5d0261e569..0000000000 --- a/pkg/storage/fs/ocis/upload.go +++ /dev/null @@ -1,655 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "hash/adler32" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/user" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/rs/zerolog" - tusd "github.com/tus/tusd/pkg/handler" -) - -var defaultFilePerm = os.FileMode(0664) - -// TODO Upload (and InitiateUpload) needs a way to receive the expected checksum. -// Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? -func (fs *ocisfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) (err error) { - upload, err := fs.GetUpload(ctx, ref.GetPath()) - if err != nil { - // Upload corresponding to this ID was not found. - // Assume that this corresponds to the resource path to which the file has to be uploaded. - - // Set the length to 0 and set SizeIsDeferred to true - metadata := map[string]string{"sizedeferred": "true"} - uploadIDs, err := fs.InitiateUpload(ctx, ref, 0, metadata) - if err != nil { - return err - } - if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { - return errors.Wrap(err, "ocisfs: error retrieving upload") - } - } - - uploadInfo := upload.(*fileUpload) - - p := uploadInfo.info.Storage["NodeName"] - ok, err := chunking.IsChunked(p) // check chunking v1 - if err != nil { - return errors.Wrap(err, "ocisfs: error checking path") - } - if ok { - var assembledFile string - p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) - if err != nil { - return err - } - if p == "" { - if err = uploadInfo.Terminate(ctx); err != nil { - return errors.Wrap(err, "ocfs: error removing auxiliary files") - } - return errtypes.PartialContent(ref.String()) - } - uploadInfo.info.Storage["NodeName"] = p - fd, err := os.Open(assembledFile) - if err != nil { - return errors.Wrap(err, "ocisfs: error opening assembled file") - } - defer fd.Close() - defer os.RemoveAll(assembledFile) - r = fd - } - - if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { - return errors.Wrap(err, "ocisfs: error writing to binary file") - } - - return uploadInfo.FinishUpload(ctx) -} - -// InitiateUpload returns upload ids corresponding to different protocols it supports -// TODO read optional content for small files in this request -// TODO InitiateUpload (and Upload) needs a way to receive the expected checksum. Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? -func (fs *ocisfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { - - log := appctx.GetLogger(ctx) - - var relative string // the internal path of the file node - - n, err := fs.lu.NodeFromResource(ctx, ref) - if err != nil { - return nil, err - } - - // permissions are checked in NewUpload below - - relative, err = fs.lu.Path(ctx, n) - if err != nil { - return nil, err - } - - info := tusd.FileInfo{ - MetaData: tusd.MetaData{ - "filename": filepath.Base(relative), - "dir": filepath.Dir(relative), - }, - Size: uploadLength, - } - - if metadata != nil { - if metadata["mtime"] != "" { - info.MetaData["mtime"] = metadata["mtime"] - } - if _, ok := metadata["sizedeferred"]; ok { - info.SizeIsDeferred = true - } - if metadata["checksum"] != "" { - parts := strings.SplitN(metadata["checksum"], " ", 2) - if len(parts) != 2 { - return nil, errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") - } - switch parts[0] { - case "sha1", "md5", "adler32": - info.MetaData["checksum"] = metadata["checksum"] - default: - return nil, errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) - } - } - } - - log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("ocisfs: resolved filename") - - upload, err := fs.NewUpload(ctx, info) - if err != nil { - return nil, err - } - - info, _ = upload.GetInfo(ctx) - - return map[string]string{ - "simple": info.ID, - "tus": info.ID, - }, nil -} - -// UseIn tells the tus upload middleware which extensions it supports. -func (fs *ocisfs) UseIn(composer *tusd.StoreComposer) { - composer.UseCore(fs) - composer.UseTerminater(fs) - composer.UseConcater(fs) - composer.UseLengthDeferrer(fs) -} - -// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol -// - the storage needs to implement NewUpload and GetUpload -// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload - -func (fs *ocisfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { - - log := appctx.GetLogger(ctx) - log.Debug().Interface("info", info).Msg("ocisfs: NewUpload") - - fn := info.MetaData["filename"] - if fn == "" { - return nil, errors.New("ocisfs: missing filename in metadata") - } - info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) - - dir := info.MetaData["dir"] - if dir == "" { - return nil, errors.New("ocisfs: missing dir in metadata") - } - info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) - - n, err := fs.lu.NodeFromPath(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) - if err != nil { - return nil, errors.Wrap(err, "ocisfs: error wrapping filename") - } - - log.Debug().Interface("info", info).Interface("node", n).Msg("ocisfs: resolved filename") - - // the parent owner will become the new owner - p, perr := n.Parent() - if perr != nil { - return nil, errors.Wrap(perr, "ocisfs: error getting parent "+n.ParentID) - } - - // check permissions - var ok bool - if n.Exists { - // check permissions of file to be overwritten - ok, err = fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - return rp.InitiateFileUpload - }) - } else { - // check permissions of parent - ok, err = fs.p.HasPermission(ctx, p, func(rp *provider.ResourcePermissions) bool { - return rp.InitiateFileUpload - }) - } - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - info.ID = uuid.New().String() - - binPath, err := fs.getUploadPath(ctx, info.ID) - if err != nil { - return nil, errors.Wrap(err, "ocisfs: error resolving upload path") - } - usr := user.ContextMustGetUser(ctx) - - owner, err := p.Owner() - if err != nil { - return nil, errors.Wrap(err, "ocisfs: error determining owner") - } - - info.Storage = map[string]string{ - "Type": "OCISStore", - "BinPath": binPath, - - "NodeId": n.ID, - "NodeParentId": n.ParentID, - "NodeName": n.Name, - - "Idp": usr.Id.Idp, - "UserId": usr.Id.OpaqueId, - "UserName": usr.Username, - - "OwnerIdp": owner.Idp, - "OwnerId": owner.OpaqueId, - - "LogLevel": log.GetLevel().String(), - } - // Create binary file in the upload folder with no content - log.Debug().Interface("info", info).Msg("ocisfs: built storage info") - file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) - if err != nil { - return nil, err - } - defer file.Close() - - u := &fileUpload{ - info: info, - binPath: binPath, - infoPath: filepath.Join(fs.o.Root, "uploads", info.ID+".info"), - fs: fs, - ctx: ctx, - } - - if !info.SizeIsDeferred && info.Size == 0 { - log.Debug().Interface("info", info).Msg("ocisfs: finishing upload for empty file") - // no need to create info file and finish directly - err := u.FinishUpload(ctx) - if err != nil { - return nil, err - } - return u, nil - } - - // writeInfo creates the file by itself if necessary - err = u.writeInfo() - if err != nil { - return nil, err - } - - return u, nil -} - -func (fs *ocisfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { - return filepath.Join(fs.o.Root, "uploads", uploadID), nil -} - -// GetUpload returns the Upload for the given upload id -func (fs *ocisfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { - infoPath := filepath.Join(fs.o.Root, "uploads", id+".info") - - info := tusd.FileInfo{} - data, err := ioutil.ReadFile(infoPath) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &info); err != nil { - return nil, err - } - - stat, err := os.Stat(info.Storage["BinPath"]) - if err != nil { - return nil, err - } - - info.Offset = stat.Size() - - u := &userpb.User{ - Id: &userpb.UserId{ - Idp: info.Storage["Idp"], - OpaqueId: info.Storage["UserId"], - }, - Username: info.Storage["UserName"], - } - - ctx = user.ContextSetUser(ctx, u) - // TODO configure the logger the same way ... store and add traceid in file info - - var opts []logger.Option - opts = append(opts, logger.WithLevel(info.Storage["LogLevel"])) - opts = append(opts, logger.WithWriter(os.Stderr, logger.ConsoleMode)) - l := logger.New(opts...) - - sub := l.With().Int("pid", os.Getpid()).Logger() - - ctx = appctx.WithLogger(ctx, &sub) - - return &fileUpload{ - info: info, - binPath: info.Storage["BinPath"], - infoPath: infoPath, - fs: fs, - ctx: ctx, - }, nil -} - -type fileUpload struct { - // info stores the current information about the upload - info tusd.FileInfo - // infoPath is the path to the .info file - infoPath string - // binPath is the path to the binary file (which has no extension) - binPath string - // only fs knows how to handle metadata and versions - fs *ocisfs - // a context with a user - // TODO add logger as well? - ctx context.Context -} - -// GetInfo returns the FileInfo -func (upload *fileUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) { - return upload.info, nil -} - -// WriteChunk writes the stream from the reader to the given offset of the upload -func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return 0, err - } - defer file.Close() - - // calculate cheksum here? needed for the TUS checksum extension. https://tus.io/protocols/resumable-upload.html#checksum - // TODO but how do we get the `Upload-Checksum`? WriteChunk() only has a context, offset and the reader ... - // It is sent with the PATCH request, well or in the POST when the creation-with-upload extension is used - // but the tus handler uses a context.Background() so we cannot really check the header and put it in the context ... - n, err := io.Copy(file, src) - - // If the HTTP PATCH request gets interrupted in the middle (e.g. because - // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for the ocis driver it's not important whether the stream has ended - // on purpose or accidentally. - if err != nil { - if err != io.ErrUnexpectedEOF { - return n, err - } - } - - upload.info.Offset += n - err = upload.writeInfo() // TODO info is written here ... we need to truncate in DiscardChunk - - return n, err -} - -// GetReader returns an io.Reader for the upload -func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) { - return os.Open(upload.binPath) -} - -// writeInfo updates the entire information. Everything will be overwritten. -func (upload *fileUpload) writeInfo() error { - data, err := json.Marshal(upload.info) - if err != nil { - return err - } - return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm) -} - -// FinishUpload finishes an upload and moves the file to the internal destination -func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { - - n := &Node{ - lu: upload.fs.lu, - ID: upload.info.Storage["NodeId"], - ParentID: upload.info.Storage["NodeParentId"], - Name: upload.info.Storage["NodeName"], - } - - if n.ID == "" { - n.ID = uuid.New().String() - } - targetPath := upload.fs.lu.toInternalPath(n.ID) - - sublog := appctx.GetLogger(upload.ctx).With().Interface("info", upload.info).Str("binPath", upload.binPath).Str("targetPath", targetPath).Logger() - - // calculate the checksum of the written bytes - // they will all be written to the metadata later, so we cannot omit any of them - // TODO only calculate the checksum in sync that was requested to match, the rest could be async ... but the tests currently expect all to be present - // TODO the hashes all implement BinaryMarshaler so we could try to persist the state for resumable upload. we would neet do keep track of the copied bytes ... - sha1h := sha1.New() - md5h := md5.New() - adler32h := adler32.New() - { - f, err := os.Open(upload.binPath) - if err != nil { - sublog.Err(err).Msg("ocisfs: could not open file for checksumming") - // we can continue if no oc checksum header is set - } - defer f.Close() - - r1 := io.TeeReader(f, sha1h) - r2 := io.TeeReader(r1, md5h) - - if _, err := io.Copy(adler32h, r2); err != nil { - sublog.Err(err).Msg("ocisfs: could not copy bytes for checksumming") - } - } - // compare if they match the sent checksum - // TODO the tus checksum extension would do this on every chunk, but I currently don't see an easy way to pass in the requested checksum. for now we do it in FinishUpload which is also called for chunked uploads - if upload.info.MetaData["checksum"] != "" { - parts := strings.SplitN(upload.info.MetaData["checksum"], " ", 2) - if len(parts) != 2 { - return errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") - } - switch parts[0] { - case "sha1": - err = upload.checkHash(parts[1], sha1h) - case "md5": - err = upload.checkHash(parts[1], md5h) - case "adler32": - err = upload.checkHash(parts[1], adler32h) - default: - err = errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) - } - if err != nil { - return err - } - } - - // defer writing the checksums until the node is in place - - // if target exists create new version - var fi os.FileInfo - if fi, err = os.Stat(targetPath); err == nil { - // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath := upload.fs.lu.toInternalPath(n.ID + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) - - if err = os.Rename(targetPath, versionsPath); err != nil { - sublog.Err(err). - Str("versionsPath", versionsPath). - Msg("ocisfs: could not create version") - return - } - } - - // now rename the upload to the target path - // TODO put uploads on the same underlying storage as the destination dir? - // TODO trigger a workflow as the final rename might eg involve antivirus scanning - if err = os.Rename(upload.binPath, targetPath); err != nil { - sublog.Err(err). - Msg("ocisfs: could not rename") - return - } - - // now try write all checksums - tryWritingChecksum(&sublog, n, "sha1", sha1h) - tryWritingChecksum(&sublog, n, "md5", md5h) - tryWritingChecksum(&sublog, n, "adler32", adler32h) - - // who will become the owner? the owner of the parent actually ... not the currently logged in user - err = n.writeMetadata(&userpb.UserId{ - Idp: upload.info.Storage["OwnerIdp"], - OpaqueId: upload.info.Storage["OwnerId"], - }) - if err != nil { - return errors.Wrap(err, "ocisfs: could not write metadata") - } - - // link child name to parent if it is new - childNameLink := filepath.Join(upload.fs.lu.toInternalPath(n.ParentID), n.Name) - var link string - link, err = os.Readlink(childNameLink) - if err == nil && link != "../"+n.ID { - sublog.Err(err). - Interface("node", n). - Str("childNameLink", childNameLink). - Str("link", link). - Msg("ocisfs: child name link has wrong target id, repairing") - - if err = os.Remove(childNameLink); err != nil { - return errors.Wrap(err, "ocisfs: could not remove symlink child entry") - } - } - if os.IsNotExist(err) || link != "../"+n.ID { - if err = os.Symlink("../"+n.ID, childNameLink); err != nil { - return errors.Wrap(err, "ocisfs: could not symlink child entry") - } - } - - // only delete the upload if it was successfully written to the storage - if err = os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - sublog.Err(err).Msg("ocisfs: could not delete upload info") - return - } - } - // use set arbitrary metadata? - /*if upload.info.MetaData["mtime"] != "" { - err := upload.fs.SetMtime(ctx, np, upload.info.MetaData["mtime"]) - if err != nil { - log.Err(err).Interface("info", upload.info).Msg("ocisfs: could not set mtime metadata") - return err - } - }*/ - - n.Exists = true - - return upload.fs.tp.Propagate(upload.ctx, n) -} - -func (upload *fileUpload) checkHash(expected string, h hash.Hash) error { - if expected != hex.EncodeToString(h.Sum(nil)) { - upload.discardChunk() - return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", upload.info.MetaData["checksum"], h.Sum(nil))) - } - return nil -} -func tryWritingChecksum(log *zerolog.Logger, n *Node, algo string, h hash.Hash) { - if err := n.SetChecksum(algo, h); err != nil { - log.Err(err). - Str("csType", algo). - Bytes("hash", h.Sum(nil)). - Msg("ocisfs: could not write checksum") - // this is not critical, the bytes are there so we will continue - } -} - -func (upload *fileUpload) discardChunk() { - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("ocisfs: could not discard chunk") - return - } - } -} - -// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination -// - the storage needs to implement AsTerminatableUpload -// - the upload needs to implement Terminate - -// AsTerminatableUpload returns a TerminatableUpload -func (fs *ocisfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { - return upload.(*fileUpload) -} - -// Terminate terminates the upload -func (upload *fileUpload) Terminate(ctx context.Context) error { - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - return nil -} - -// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation -// - the storage needs to implement AsLengthDeclarableUpload -// - the upload needs to implement DeclareLength - -// AsLengthDeclarableUpload returns a LengthDeclarableUpload -func (fs *ocisfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { - return upload.(*fileUpload) -} - -// DeclareLength updates the upload length information -func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error { - upload.info.Size = length - upload.info.SizeIsDeferred = false - return upload.writeInfo() -} - -// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation -// - the storage needs to implement AsConcatableUpload -// - the upload needs to implement ConcatUploads - -// AsConcatableUpload returns a ConcatableUpload -func (fs *ocisfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { - return upload.(*fileUpload) -} - -// ConcatUploads concatenates multiple uploads -func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return err - } - defer file.Close() - - for _, partialUpload := range uploads { - fileUpload := partialUpload.(*fileUpload) - - src, err := os.Open(fileUpload.binPath) - if err != nil { - return err - } - defer src.Close() - - if _, err := io.Copy(file, src); err != nil { - return err - } - } - - return -} diff --git a/pkg/storage/fs/s3ng/option.go b/pkg/storage/fs/s3ng/option.go index 6ee9f69b83..877a7d7189 100644 --- a/pkg/storage/fs/s3ng/option.go +++ b/pkg/storage/fs/s3ng/option.go @@ -19,9 +19,6 @@ package s3ng import ( - "path/filepath" - "strings" - "github.com/mitchellh/mapstructure" "github.com/pkg/errors" ) @@ -31,26 +28,6 @@ type Option func(o *Options) // Options defines the available options for this package. type Options struct { - // ocis fs works on top of a dir of uuid nodes - Root string `mapstructure:"root"` - - // UserLayout describes the relative path from the storage's root node to the users home node. - UserLayout string `mapstructure:"user_layout"` - - // TODO NodeLayout option to save nodes as eg. nodes/1d/d8/1dd84abf-9466-4e14-bb86-02fc4ea3abcf - ShareFolder string `mapstructure:"share_folder"` - - // EnableHome enables the creation of home directories. - EnableHome bool `mapstructure:"enable_home"` - - // propagate mtime changes as tmtime (tree modification time) to the parent directory when user.ocis.propagation=1 is set on a node - TreeTimeAccounting bool `mapstructure:"treetime_accounting"` - - // propagate size changes as treesize - TreeSizeAccounting bool `mapstructure:"treesize_accounting"` - - // set an owner for the root node - Owner string `mapstructure:"owner"` // Endpoint of the s3 blobstore S3Endpoint string `mapstructure:"s3.endpoint"` @@ -85,20 +62,3 @@ func parseConfig(m map[string]interface{}) (*Options, error) { } return o, nil } - -func (o *Options) init(m map[string]interface{}) { - if o.UserLayout == "" { - o.UserLayout = "{{.Id.OpaqueId}}" - } - // ensure user layout has no starting or trailing / - o.UserLayout = strings.Trim(o.UserLayout, "/") - - if o.ShareFolder == "" { - o.ShareFolder = "/Shares" - } - // ensure share folder always starts with slash - o.ShareFolder = filepath.Join("/", o.ShareFolder) - - // c.DataDirectory should never end in / unless it is the root - o.Root = filepath.Clean(o.Root) -} diff --git a/pkg/storage/fs/s3ng/s3ng.go b/pkg/storage/fs/s3ng/s3ng.go index 108d6a6404..30bc250ff7 100644 --- a/pkg/storage/fs/s3ng/s3ng.go +++ b/pkg/storage/fs/s3ng/s3ng.go @@ -18,440 +18,35 @@ package s3ng -//go:generate mockery -name PermissionsChecker -//go:generate mockery -name Tree - import ( - "context" "fmt" - "io" - "net/url" - "os" - "path/filepath" - "strings" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/fs/registry" "github.com/cs3org/reva/pkg/storage/fs/s3ng/blobstore" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/cs3org/reva/pkg/user" - "github.com/pkg/errors" - "github.com/pkg/xattr" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" ) func init() { - registry.Register("s3ng", NewDefault) -} - -// PermissionsChecker defines an interface for checking permissions on a Node -type PermissionsChecker interface { - AssemblePermissions(ctx context.Context, n *node.Node) (ap *provider.ResourcePermissions, err error) - HasPermission(ctx context.Context, n *node.Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) -} - -// Tree is used to manage a tree hierarchy -type Tree interface { - Setup(owner string) error - - GetMD(ctx context.Context, node *node.Node) (os.FileInfo, error) - ListFolder(ctx context.Context, node *node.Node) ([]*node.Node, error) - //CreateHome(owner *userpb.UserId) (n *node.Node, err error) - CreateDir(ctx context.Context, node *node.Node) (err error) - //CreateReference(ctx context.Context, node *node.Node, targetURI *url.URL) error - Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error) - Delete(ctx context.Context, node *node.Node) (err error) - RestoreRecycleItemFunc(ctx context.Context, key string) (*node.Node, func() error, error) - PurgeRecycleItemFunc(ctx context.Context, key string) (*node.Node, func() error, error) - - WriteBlob(key string, reader io.Reader) error - ReadBlob(key string) (io.ReadCloser, error) - DeleteBlob(key string) error - - Propagate(ctx context.Context, node *node.Node) (err error) -} - -// NewDefault returns an s3ng filestore using the default configuration -func NewDefault(m map[string]interface{}) (storage.FS, error) { - o, err := parseConfig(m) - if err != nil { - return nil, err - } - - lu := &Lookup{} - p := node.NewPermissions(lu) - bs, err := blobstore.New(o.S3Endpoint, o.S3Region, o.S3Bucket, o.S3AccessKey, o.S3SecretKey) - if err != nil { - return nil, err - } - tp := tree.New(o.Root, o.TreeTimeAccounting, o.TreeSizeAccounting, lu, bs) - - return New(m, lu, p, tp) + registry.Register("s3ng", New) } // New returns an implementation to of the storage.FS interface that talk to // a local filesystem. -func New(m map[string]interface{}, lu *Lookup, permissionsChecker PermissionsChecker, tp Tree) (storage.FS, error) { +func New(m map[string]interface{}) (storage.FS, error) { o, err := parseConfig(m) if err != nil { return nil, err } - o.init(m) - - lu.Options = o - - err = tp.Setup(o.Owner) - if err != nil { - logger.New().Error().Err(err). - Msg("could not setup tree") - return nil, errors.Wrap(err, "could not setup tree") - } if !o.S3ConfigComplete() { return nil, fmt.Errorf("S3 configuration incomplete") } - return &s3ngfs{ - tp: tp, - lu: lu, - o: o, - p: permissionsChecker, - chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), - }, nil -} - -type s3ngfs struct { - lu *Lookup - tp Tree - o *Options - p PermissionsChecker - chunkHandler *chunking.ChunkHandler -} - -func (fs *s3ngfs) Shutdown(ctx context.Context) error { - return nil -} - -func (fs *s3ngfs) GetQuota(ctx context.Context) (uint64, uint64, error) { - return 0, 0, nil -} - -// CreateHome creates a new root node that has no parent id -func (fs *s3ngfs) CreateHome(ctx context.Context) (err error) { - if !fs.o.EnableHome || fs.o.UserLayout == "" { - return errtypes.NotSupported("s3ngfs: CreateHome() home supported disabled") - } - - var n, h *node.Node - if n, err = fs.lu.RootNode(ctx); err != nil { - return - } - h, err = fs.lu.WalkPath(ctx, n, fs.lu.mustGetUserLayout(ctx), func(ctx context.Context, n *node.Node) error { - if !n.Exists { - if err := fs.tp.CreateDir(ctx, n); err != nil { - return err - } - } - return nil - }) - if err != nil { - return - } - - // update the owner - u := user.ContextMustGetUser(ctx) - if err = h.WriteMetadata(u.Id); err != nil { - return - } - - if fs.o.TreeTimeAccounting { - homePath := h.InternalPath() - // mark the home node as the end of propagation - if err = xattr.Set(homePath, xattrs.PropagationAttr, []byte("1")); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", h).Msg("could not mark home as propagation root") - return - } - } - return -} - -// GetHome is called to look up the home path for a user -// It is NOT supposed to return the internal path but the external path -func (fs *s3ngfs) GetHome(ctx context.Context) (string, error) { - if !fs.o.EnableHome || fs.o.UserLayout == "" { - return "", errtypes.NotSupported("s3ngfs: GetHome() home supported disabled") - } - u := user.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.o.UserLayout) - return filepath.Join(fs.o.Root, layout), nil // TODO use a namespace? -} - -// GetPathByID returns the fn pointed by the file id, without the internal namespace -func (fs *s3ngfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { - node, err := fs.lu.NodeFromID(ctx, id) - if err != nil { - return "", err - } - - return fs.lu.Path(ctx, node) -} - -func (fs *s3ngfs) CreateDir(ctx context.Context, fn string) (err error) { - var n *node.Node - if n, err = fs.lu.NodeFromPath(ctx, fn); err != nil { - return - } - - if n.Exists { - return errtypes.AlreadyExists(fn) - } - pn, err := n.Parent() - if err != nil { - return errors.Wrap(err, "s3ngfs: error getting parent "+n.ParentID) - } - ok, err := fs.p.HasPermission(ctx, pn, func(rp *provider.ResourcePermissions) bool { - return rp.CreateContainer - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - err = fs.tp.CreateDir(ctx, n) - - if fs.o.TreeTimeAccounting { - nodePath := n.InternalPath() - // mark the home node as the end of propagation - if err = xattr.Set(nodePath, xattrs.PropagationAttr, []byte("1")); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") - return - } - } - return -} - -// CreateReference creates a reference as a node folder with the target stored in extended attributes -// There is no difference between the /Shares folder and normal nodes because the storage is not supposed to be accessible without the storage provider. -// In effect everything is a shadow namespace. -// To mimic the eos end owncloud driver we only allow references as children of the "/Shares" folder -// TODO when home support is enabled should the "/Shares" folder still be listed? -func (fs *s3ngfs) CreateReference(ctx context.Context, p string, targetURI *url.URL) (err error) { - - p = strings.Trim(p, "/") - parts := strings.Split(p, "/") - - if len(parts) != 2 { - return errtypes.PermissionDenied("s3ngfs: references must be a child of the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) - } - - if parts[0] != strings.Trim(fs.o.ShareFolder, "/") { - return errtypes.PermissionDenied("s3ngfs: cannot create references outside the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) - } - - // create Shares folder if it does not exist - var n *node.Node - if n, err = fs.lu.NodeFromPath(ctx, fs.o.ShareFolder); err != nil { - return errtypes.InternalError(err.Error()) - } else if !n.Exists { - if err = fs.tp.CreateDir(ctx, n); err != nil { - return - } - } - - if n, err = n.Child(parts[1]); err != nil { - return errtypes.InternalError(err.Error()) - } - - if n.Exists { - // TODO append increasing number to mountpoint name - return errtypes.AlreadyExists(p) - } - - if err = fs.tp.CreateDir(ctx, n); err != nil { - return - } - - internal := n.InternalPath() - if err = xattr.Set(internal, xattrs.ReferenceAttr, []byte(targetURI.String())); err != nil { - return errors.Wrapf(err, "s3ngfs: error setting the target %s on the reference file %s", targetURI.String(), internal) - } - return nil -} - -func (fs *s3ngfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { - var oldNode, newNode *node.Node - if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil { - return - } - - if !oldNode.Exists { - err = errtypes.NotFound(filepath.Join(oldNode.ParentID, oldNode.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, oldNode, func(rp *provider.ResourcePermissions) bool { - return rp.Move - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(oldNode.ID) - } - - if newNode, err = fs.lu.NodeFromResource(ctx, newRef); err != nil { - return - } - if newNode.Exists { - err = errtypes.AlreadyExists(filepath.Join(newNode.ParentID, newNode.Name)) - return - } - - return fs.tp.Move(ctx, oldNode, newNode) -} - -func (fs *s3ngfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (ri *provider.ResourceInfo, err error) { - var node *node.Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - rp, err := fs.p.AssemblePermissions(ctx, node) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !rp.Stat: - return nil, errtypes.PermissionDenied(node.ID) - } - - return node.AsResourceInfo(ctx, rp, mdKeys) -} - -func (fs *s3ngfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) (finfos []*provider.ResourceInfo, err error) { - var n *node.Node - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return - } - - rp, err := fs.p.AssemblePermissions(ctx, n) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !rp.ListContainer: - return nil, errtypes.PermissionDenied(n.ID) - } - - var children []*node.Node - children, err = fs.tp.ListFolder(ctx, n) - if err != nil { - return - } - - for i := range children { - np := rp - // add this childs permissions - node.AddPermissions(np, n.PermissionSet(ctx)) - if ri, err := children[i].AsResourceInfo(ctx, np, mdKeys); err == nil { - finfos = append(finfos, ri) - } - } - return -} - -func (fs *s3ngfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { - var node *node.Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.Delete - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - return fs.tp.Delete(ctx, node) -} - -// Data persistence -func (fs *s3ngfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { - node, err := fs.lu.NodeFromResource(ctx, ref) + bs, err := blobstore.New(o.S3Endpoint, o.S3Region, o.S3Bucket, o.S3AccessKey, o.S3SecretKey) if err != nil { - return nil, errors.Wrap(err, "s3ngfs: error resolving ref") - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) return nil, err } - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.InitiateFileDownload - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - reader, err := fs.tp.ReadBlob(node.ID) - if err != nil { - return nil, errors.Wrap(err, "s3ngfs: error download blob '"+node.ID+"'") - } - return reader, nil -} - -// arbitrary metadata persistence in metadata.go - -// Version persistence in revisions.go - -// Trash persistence in recycle.go - -// share persistence in grants.go - -func (fs *s3ngfs) copyMD(s string, t string) (err error) { - var attrs []string - if attrs, err = xattr.List(s); err != nil { - return err - } - for i := range attrs { - if strings.HasPrefix(attrs[i], xattrs.OcisPrefix) { - var d []byte - if d, err = xattr.Get(s, attrs[i]); err != nil { - return err - } - if err = xattr.Set(t, attrs[i], d); err != nil { - return err - } - } - } - return nil + return decomposedfs.NewDefault(m, bs) } diff --git a/pkg/storage/fs/s3ng/s3ng_test.go b/pkg/storage/fs/s3ng/s3ng_test.go index dba4be27a0..e4fc6984b5 100644 --- a/pkg/storage/fs/s3ng/s3ng_test.go +++ b/pkg/storage/fs/s3ng/s3ng_test.go @@ -19,22 +19,11 @@ package s3ng_test import ( - "context" "io/ioutil" "os" "strings" - "github.com/stretchr/testify/mock" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/fs/s3ng" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/mocks" - helpers "github.com/cs3org/reva/pkg/storage/fs/s3ng/testhelpers" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree" - treemocks "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree/mocks" - ruser "github.com/cs3org/reva/pkg/user" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -42,32 +31,11 @@ import ( var _ = Describe("S3ng", func() { var ( - ref *provider.Reference - user *userpb.User - ctx context.Context - - options map[string]interface{} - lookup *s3ng.Lookup - permissions *mocks.PermissionsChecker - bs *treemocks.Blobstore - fs storage.FS + options map[string]interface{} + tmpRoot string ) BeforeEach(func() { - ref = &provider.Reference{ - Spec: &provider.Reference_Path{ - Path: "foo", - }, - } - user = &userpb.User{ - Id: &userpb.UserId{ - Idp: "idp", - OpaqueId: "userid", - }, - Username: "username", - } - ctx = ruser.ContextSetUser(context.Background(), user) - tmpRoot, err := ioutil.TempDir("", "reva-unit-tests-*-root") Expect(err).ToNot(HaveOccurred()) @@ -81,60 +49,23 @@ var _ = Describe("S3ng", func() { "s3.access_key": "foo", "s3.secret_key": "bar", } - lookup = &s3ng.Lookup{} - permissions = &mocks.PermissionsChecker{} - bs = &treemocks.Blobstore{} - }) - - JustBeforeEach(func() { - var err error - tree := tree.New(options["root"].(string), true, true, lookup, bs) - fs, err = s3ng.New(options, lookup, permissions, tree) - Expect(err).ToNot(HaveOccurred()) - Expect(fs.CreateHome(ctx)).To(Succeed()) }) AfterEach(func() { - root := options["root"].(string) - if strings.HasPrefix(root, os.TempDir()) { - os.RemoveAll(root) + if strings.HasPrefix(tmpRoot, os.TempDir()) { + os.RemoveAll(tmpRoot) } }) - Describe("NewDefault", func() { + Describe("New", func() { It("fails on missing s3 configuration", func() { - _, err := s3ng.NewDefault(map[string]interface{}{}) + _, err := s3ng.New(map[string]interface{}{}) Expect(err).To(MatchError("S3 configuration incomplete")) }) - }) - Describe("Delete", func() { - JustBeforeEach(func() { - _, err := helpers.CreateEmptyNode(ctx, "foo", "foo", user.Id, lookup) + It("works with complete configuration", func() { + _, err := s3ng.New(options) Expect(err).ToNot(HaveOccurred()) }) - - Context("with insufficient permissions", func() { - It("returns an error", func() { - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - - err := fs.Delete(ctx, ref) - - Expect(err).To(MatchError(ContainSubstring("permission denied"))) - }) - }) - - Context("with sufficient permissions", func() { - JustBeforeEach(func() { - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - }) - - It("does not (yet) delete the blob from the blobstore", func() { - err := fs.Delete(ctx, ref) - - Expect(err).ToNot(HaveOccurred()) - bs.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) - }) - }) }) }) diff --git a/pkg/storage/fs/s3ng/testhelpers/helpers.go b/pkg/storage/fs/s3ng/testhelpers/helpers.go deleted file mode 100644 index 6cbb960d07..0000000000 --- a/pkg/storage/fs/s3ng/testhelpers/helpers.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package helpers - -import ( - "context" - "os" - "path" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree" - ruser "github.com/cs3org/reva/pkg/user" -) - -// CreateEmptyNodeForOtherUser creates a home and an empty node for a new user -func CreateEmptyNodeForOtherUser(id, name string, fs storage.FS, lookup tree.PathLookup) (*node.Node, error) { - user := &userpb.User{ - Id: &userpb.UserId{ - Idp: "idp", - OpaqueId: "userid2", - }, - Username: "otheruser", - } - ctx := ruser.ContextSetUser(context.Background(), user) - err := fs.CreateHome(ctx) - if err != nil { - return nil, err - } - return CreateEmptyNode(ctx, id, name, user.Id, lookup) -} - -// CreateEmptyNode creates a home and an empty node for the given context -func CreateEmptyNode(ctx context.Context, id, name string, userid *userpb.UserId, lookup tree.PathLookup) (*node.Node, error) { - root, err := lookup.HomeOrRootNode(ctx) - if err != nil { - return nil, err - } - - n := node.New(id, root.ID, name, 1234, userid, lookup) - p, err := n.Parent() - if err != nil { - return nil, err - } - - // Create an empty file node - _, err = os.OpenFile(n.InternalPath(), os.O_CREATE, 0644) - if err != nil { - return nil, err - } - - // ... and an according link in the parent - err = os.Symlink("../"+n.ID, path.Join(p.InternalPath(), n.Name)) - if err != nil { - return nil, err - } - - err = n.WriteMetadata(userid) - if err != nil { - return nil, err - } - - return n, nil -} diff --git a/pkg/storage/fs/s3ng/tree/tree_test.go b/pkg/storage/fs/s3ng/tree/tree_test.go deleted file mode 100644 index 02a3e37d60..0000000000 --- a/pkg/storage/fs/s3ng/tree/tree_test.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package tree_test - -import ( - "context" - "io/ioutil" - "os" - "path" - "strings" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/cs3org/reva/pkg/storage/fs/s3ng" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree/mocks" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" - ruser "github.com/cs3org/reva/pkg/user" - "github.com/mitchellh/mapstructure" - "github.com/pkg/xattr" - "github.com/stretchr/testify/mock" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Tree", func() { - var ( - user *userpb.User - ctx context.Context - - blobstore *mocks.Blobstore - lookup tree.PathLookup - options *s3ng.Options - - t *tree.Tree - treeTimeAccounting bool - treeSizeAccounting bool - ) - - BeforeEach(func() { - user = &userpb.User{ - Id: &userpb.UserId{ - Idp: "idp", - OpaqueId: "userid", - }, - Username: "username", - } - ctx = ruser.ContextSetUser(context.Background(), user) - tmpRoot, err := ioutil.TempDir("", "reva-unit-tests-*-root") - Expect(err).ToNot(HaveOccurred()) - options = &s3ng.Options{} - err = mapstructure.Decode(map[string]interface{}{ - "root": tmpRoot, - }, options) - Expect(err).ToNot(HaveOccurred()) - - blobstore = &mocks.Blobstore{} - lookup = &s3ng.Lookup{Options: options} - }) - - JustBeforeEach(func() { - t = tree.New(options.Root, treeTimeAccounting, treeSizeAccounting, lookup, blobstore) - Expect(t.Setup("root")).To(Succeed()) - }) - - AfterEach(func() { - root := options.Root - if strings.HasPrefix(root, os.TempDir()) { - os.RemoveAll(root) - } - }) - - Describe("New", func() { - It("returns a Tree instance", func() { - Expect(t).ToNot(BeNil()) - }) - }) - - Context("with an existingfile", func() { - var ( - n *node.Node - ) - - JustBeforeEach(func() { - n = createEmptyNode("fooId", "root", "fooName", user.Id, lookup) - Expect(n.WriteMetadata(user.Id)).To(Succeed()) - }) - - Describe("Delete", func() { - JustBeforeEach(func() { - _, err := os.Stat(n.InternalPath()) - Expect(err).ToNot(HaveOccurred()) - - Expect(t.Delete(ctx, n)).To(Succeed()) - - _, err = os.Stat(n.InternalPath()) - Expect(err).To(HaveOccurred()) - }) - - It("moves the file to the trash", func() { - trashPath := path.Join(options.Root, "trash", user.Id.OpaqueId, n.ID) - _, err := os.Stat(trashPath) - Expect(err).ToNot(HaveOccurred()) - }) - - It("removes the file from its original location", func() { - _, err := os.Stat(n.InternalPath()) - Expect(err).To(HaveOccurred()) - }) - - It("sets the trash origin xattr", func() { - trashPath := path.Join(options.Root, "trash", user.Id.OpaqueId, n.ID) - attr, err := xattr.Get(trashPath, xattrs.TrashOriginAttr) - Expect(err).ToNot(HaveOccurred()) - Expect(string(attr)).To(Equal(n.Name)) - }) - - It("does not delete the blob from the blobstore", func() { - blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) - }) - }) - - Context("that was deleted", func() { - var ( - trashPath string - ) - - BeforeEach(func() { - blobstore.On("Delete", n.ID).Return(nil) - trashPath = path.Join(options.Root, "trash", user.Id.OpaqueId, n.ID) - }) - - JustBeforeEach(func() { - Expect(t.Delete(ctx, n)).To(Succeed()) - }) - - Describe("PurgeRecycleItemFunc", func() { - JustBeforeEach(func() { - _, err := os.Stat(trashPath) - Expect(err).ToNot(HaveOccurred()) - - _, purgeFunc, err := t.PurgeRecycleItemFunc(ctx, user.Id.OpaqueId+":"+n.ID) - Expect(err).ToNot(HaveOccurred()) - Expect(purgeFunc()).To(Succeed()) - }) - - It("removes the file from the trash", func() { - _, err := os.Stat(trashPath) - Expect(err).To(HaveOccurred()) - }) - - It("deletes the blob from the blobstore", func() { - blobstore.AssertCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) - }) - }) - - Describe("RestoreRecycleItemFunc", func() { - JustBeforeEach(func() { - _, err := os.Stat(trashPath) - Expect(err).ToNot(HaveOccurred()) - _, err = os.Stat(n.InternalPath()) - Expect(err).To(HaveOccurred()) - - _, restoreFunc, err := t.RestoreRecycleItemFunc(ctx, user.Id.OpaqueId+":"+n.ID) - Expect(err).ToNot(HaveOccurred()) - Expect(restoreFunc()).To(Succeed()) - }) - - It("restores the file to its original location", func() { - _, err := os.Stat(n.InternalPath()) - Expect(err).ToNot(HaveOccurred()) - }) - It("removes the file from the trash", func() { - _, err := os.Stat(trashPath) - Expect(err).To(HaveOccurred()) - }) - }) - }) - }) -}) - -func createEmptyNode(id, parent, name string, userid *userpb.UserId, lookup tree.PathLookup) *node.Node { - n := node.New(id, parent, name, 0, userid, lookup) - p, err := n.Parent() - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - - // Create an empty file node - _, err = os.OpenFile(n.InternalPath(), os.O_CREATE, 0644) - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - - // ... and an according link in the parent - err = os.Symlink("../"+n.ID, path.Join(p.InternalPath(), n.Name)) - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - - return n -} diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go new file mode 100644 index 0000000000..05c5d02163 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -0,0 +1,485 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package decomposedfs + +//go:generate mockery -name PermissionsChecker +//go:generate mockery -name Tree + +import ( + "context" + "io" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/logger" + "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/pkg/storage/utils/templates" + "github.com/cs3org/reva/pkg/user" + "github.com/pkg/errors" + "github.com/pkg/xattr" +) + +// PermissionsChecker defines an interface for checking permissions on a Node +type PermissionsChecker interface { + AssemblePermissions(ctx context.Context, n *node.Node) (ap *provider.ResourcePermissions, err error) + HasPermission(ctx context.Context, n *node.Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) +} + +// Tree is used to manage a tree hierarchy +type Tree interface { + Setup(owner string) error + + GetMD(ctx context.Context, node *node.Node) (os.FileInfo, error) + ListFolder(ctx context.Context, node *node.Node) ([]*node.Node, error) + //CreateHome(owner *userpb.UserId) (n *node.Node, err error) + CreateDir(ctx context.Context, node *node.Node) (err error) + //CreateReference(ctx context.Context, node *node.Node, targetURI *url.URL) error + Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error) + Delete(ctx context.Context, node *node.Node) (err error) + RestoreRecycleItemFunc(ctx context.Context, key string) (*node.Node, func() error, error) + PurgeRecycleItemFunc(ctx context.Context, key string) (*node.Node, func() error, error) + + WriteBlob(key string, reader io.Reader) error + ReadBlob(key string) (io.ReadCloser, error) + DeleteBlob(key string) error + + Propagate(ctx context.Context, node *node.Node) (err error) +} + +// Decomposedfs provides the base for decomposed filesystem implementations +type Decomposedfs struct { + lu *Lookup + tp Tree + o *options.Options + p PermissionsChecker + chunkHandler *chunking.ChunkHandler +} + +// NewDefault returns an instance with default components +func NewDefault(m map[string]interface{}, bs tree.Blobstore) (storage.FS, error) { + o, err := options.New(m) + if err != nil { + return nil, err + } + + lu := &Lookup{} + p := node.NewPermissions(lu) + + lu.Options = o + + tp := tree.New(o.Root, o.TreeTimeAccounting, o.TreeSizeAccounting, lu, bs) + return New(o, lu, p, tp) +} + +// New returns an implementation of the storage.FS interface that talks to +// a local filesystem. +func New(o *options.Options, lu *Lookup, p PermissionsChecker, tp Tree) (storage.FS, error) { + err := tp.Setup(o.Owner) + if err != nil { + logger.New().Error().Err(err). + Msg("could not setup tree") + return nil, errors.Wrap(err, "could not setup tree") + } + + return &Decomposedfs{ + tp: tp, + lu: lu, + o: o, + p: p, + chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), + }, nil +} + +// Shutdown shuts down the storage +func (fs *Decomposedfs) Shutdown(ctx context.Context) error { + return nil +} + +// GetQuota returns the quota available +// TODO Document in the cs3 should we return quota or free space? +func (fs *Decomposedfs) GetQuota(ctx context.Context) (uint64, uint64, error) { + var n *node.Node + var err error + if n, err = fs.lu.HomeOrRootNode(ctx); err != nil { + return 0, 0, err + } + + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return 0, 0, err + } + + rp, err := fs.p.AssemblePermissions(ctx, n) + switch { + case err != nil: + return 0, 0, errtypes.InternalError(err.Error()) + case !rp.GetQuota: + return 0, 0, errtypes.PermissionDenied(n.ID) + } + + ri, err := n.AsResourceInfo(ctx, rp, []string{"treesize", "quota"}) + if err != nil { + return 0, 0, err + } + + quotaStr := node.QuotaUnknown + if ri.Opaque != nil && ri.Opaque.Map != nil && ri.Opaque.Map["quota"] != nil && ri.Opaque.Map["quota"].Decoder == "plain" { + quotaStr = string(ri.Opaque.Map["quota"].Value) + } + + avail, err := fs.getAvailableSize(n.InternalPath()) + if err != nil { + return 0, 0, err + } + total := avail + ri.Size + + switch { + case quotaStr == node.QuotaUncalculated, quotaStr == node.QuotaUnknown, quotaStr == node.QuotaUnlimited: + // best we can do is return current total + // TODO indicate unlimited total? -> in opaque data? + default: + if quota, err := strconv.ParseUint(quotaStr, 10, 64); err == nil { + if total > quota { + total = quota + } + } + } + return total, ri.Size, nil +} + +// CreateHome creates a new home node for the given user +func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) { + if !fs.o.EnableHome || fs.o.UserLayout == "" { + return errtypes.NotSupported("Decomposedfs: CreateHome() home supported disabled") + } + + var n, h *node.Node + if n, err = fs.lu.RootNode(ctx); err != nil { + return + } + h, err = fs.lu.WalkPath(ctx, n, fs.lu.mustGetUserLayout(ctx), func(ctx context.Context, n *node.Node) error { + if !n.Exists { + if err := fs.tp.CreateDir(ctx, n); err != nil { + return err + } + } + return nil + }) + if err != nil { + return + } + + // update the owner + u := user.ContextMustGetUser(ctx) + if err = h.WriteMetadata(u.Id); err != nil { + return + } + + if fs.o.TreeTimeAccounting { + homePath := h.InternalPath() + // mark the home node as the end of propagation + if err = xattr.Set(homePath, xattrs.PropagationAttr, []byte("1")); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", h).Msg("could not mark home as propagation root") + return + } + } + return +} + +// GetHome is called to look up the home path for a user +// It is NOT supposed to return the internal path but the external path +func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) { + if !fs.o.EnableHome || fs.o.UserLayout == "" { + return "", errtypes.NotSupported("Decomposedfs: GetHome() home supported disabled") + } + u := user.ContextMustGetUser(ctx) + layout := templates.WithUser(u, fs.o.UserLayout) + return filepath.Join(fs.o.Root, layout), nil // TODO use a namespace? +} + +// GetPathByID returns the fn pointed by the file id, without the internal namespace +func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { + node, err := fs.lu.NodeFromID(ctx, id) + if err != nil { + return "", err + } + + return fs.lu.Path(ctx, node) +} + +// CreateDir creates the specified directory +func (fs *Decomposedfs) CreateDir(ctx context.Context, fn string) (err error) { + var n *node.Node + if n, err = fs.lu.NodeFromPath(ctx, fn); err != nil { + return + } + + if n.Exists { + return errtypes.AlreadyExists(fn) + } + pn, err := n.Parent() + if err != nil { + return errors.Wrap(err, "Decomposedfs: error getting parent "+n.ParentID) + } + ok, err := fs.p.HasPermission(ctx, pn, func(rp *provider.ResourcePermissions) bool { + return rp.CreateContainer + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + err = fs.tp.CreateDir(ctx, n) + + if fs.o.TreeTimeAccounting { + nodePath := n.InternalPath() + // mark the home node as the end of propagation + if err = xattr.Set(nodePath, xattrs.PropagationAttr, []byte("1")); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") + return + } + } + return +} + +// CreateReference creates a reference as a node folder with the target stored in extended attributes +// There is no difference between the /Shares folder and normal nodes because the storage is not supposed to be accessible without the storage provider. +// In effect everything is a shadow namespace. +// To mimic the eos end owncloud driver we only allow references as children of the "/Shares" folder +// TODO when home support is enabled should the "/Shares" folder still be listed? +func (fs *Decomposedfs) CreateReference(ctx context.Context, p string, targetURI *url.URL) (err error) { + + p = strings.Trim(p, "/") + parts := strings.Split(p, "/") + + if len(parts) != 2 { + return errtypes.PermissionDenied("Decomposedfs: references must be a child of the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) + } + + if parts[0] != strings.Trim(fs.o.ShareFolder, "/") { + return errtypes.PermissionDenied("Decomposedfs: cannot create references outside the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) + } + + // create Shares folder if it does not exist + var n *node.Node + if n, err = fs.lu.NodeFromPath(ctx, fs.o.ShareFolder); err != nil { + return errtypes.InternalError(err.Error()) + } else if !n.Exists { + if err = fs.tp.CreateDir(ctx, n); err != nil { + return + } + } + + if n, err = n.Child(ctx, parts[1]); err != nil { + return errtypes.InternalError(err.Error()) + } + + if n.Exists { + // TODO append increasing number to mountpoint name + return errtypes.AlreadyExists(p) + } + + if err = fs.tp.CreateDir(ctx, n); err != nil { + return + } + + internal := n.InternalPath() + if err = xattr.Set(internal, xattrs.ReferenceAttr, []byte(targetURI.String())); err != nil { + return errors.Wrapf(err, "Decomposedfs: error setting the target %s on the reference file %s", targetURI.String(), internal) + } + return nil +} + +// Move moves a resource from one reference to another +func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { + var oldNode, newNode *node.Node + if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil { + return + } + + if !oldNode.Exists { + err = errtypes.NotFound(filepath.Join(oldNode.ParentID, oldNode.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, oldNode, func(rp *provider.ResourcePermissions) bool { + return rp.Move + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(oldNode.ID) + } + + if newNode, err = fs.lu.NodeFromResource(ctx, newRef); err != nil { + return + } + if newNode.Exists { + err = errtypes.AlreadyExists(filepath.Join(newNode.ParentID, newNode.Name)) + return + } + + return fs.tp.Move(ctx, oldNode, newNode) +} + +// GetMD returns the metadata for the specified resource +func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (ri *provider.ResourceInfo, err error) { + var node *node.Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + rp, err := fs.p.AssemblePermissions(ctx, node) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !rp.Stat: + return nil, errtypes.PermissionDenied(node.ID) + } + + return node.AsResourceInfo(ctx, rp, mdKeys) +} + +// ListFolder returns a list of resources in the specified folder +func (fs *Decomposedfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) (finfos []*provider.ResourceInfo, err error) { + var n *node.Node + if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return + } + + rp, err := fs.p.AssemblePermissions(ctx, n) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !rp.ListContainer: + return nil, errtypes.PermissionDenied(n.ID) + } + + var children []*node.Node + children, err = fs.tp.ListFolder(ctx, n) + if err != nil { + return + } + + for i := range children { + np := rp + // add this childs permissions + node.AddPermissions(np, n.PermissionSet(ctx)) + if ri, err := children[i].AsResourceInfo(ctx, np, mdKeys); err == nil { + finfos = append(finfos, ri) + } + } + return +} + +// Delete deletes the specified resource +func (fs *Decomposedfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { + var node *node.Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { + return rp.Delete + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) + } + + return fs.tp.Delete(ctx, node) +} + +// Download returns a reader to the specified resource +func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { + node, err := fs.lu.NodeFromResource(ctx, ref) + if err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error resolving ref") + } + + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return nil, err + } + + ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { + return rp.InitiateFileDownload + }) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !ok: + return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) + } + + reader, err := fs.tp.ReadBlob(node.BlobID) + if err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error download blob '"+node.ID+"'") + } + return reader, nil +} + +func (fs *Decomposedfs) copyMD(s string, t string) (err error) { + var attrs []string + if attrs, err = xattr.List(s); err != nil { + return err + } + for i := range attrs { + if strings.HasPrefix(attrs[i], xattrs.OcisPrefix) { + var d []byte + if d, err = xattr.Get(s, attrs[i]); err != nil { + return err + } + if err = xattr.Set(t, attrs[i], d); err != nil { + return err + } + } + } + return nil +} diff --git a/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go b/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go new file mode 100644 index 0000000000..0d00d888d1 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go @@ -0,0 +1,160 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package decomposedfs_test + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "sync" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" + treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" + "github.com/cs3org/reva/pkg/user" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Decomposed", func() { + var ( + options map[string]interface{} + ctx context.Context + tmpRoot string + fs storage.FS + ) + + BeforeEach(func() { + tmpRoot, err := ioutil.TempDir("", "reva-unit-tests-*-root") + Expect(err).ToNot(HaveOccurred()) + + options = map[string]interface{}{ + "root": tmpRoot, + "share_folder": "/Shares", + "enable_home": false, + "user_layout": "{{.Id.OpaqueId}}", + "owner": "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c", + } + u := &userpb.User{ + Id: &userpb.UserId{ + OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c", + }, + Username: "test", + Mail: "marie@example.org", + DisplayName: "Marie Curie", + Groups: []string{ + "radium-lovers", + "polonium-lovers", + "physics-lovers", + }, + } + ctx = user.ContextSetUser(context.Background(), u) + + bs := &treemocks.Blobstore{} + fs, err = decomposedfs.NewDefault(options, bs) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + if strings.HasPrefix(tmpRoot, os.TempDir()) { + os.RemoveAll(tmpRoot) + } + }) + + Describe("concurrent", func() { + Describe("Upload", func() { + var ( + f, f1 *os.File + ) + + BeforeEach(func() { + // Prepare two test files for upload + err := ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpRoot, "f.lol"), []byte("test"), 0644) + Expect(err).ToNot(HaveOccurred()) + f, err = os.Open(fmt.Sprintf("%s/%s", tmpRoot, "f.lol")) + Expect(err).ToNot(HaveOccurred()) + + err = ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpRoot, "f1.lol"), []byte("another run"), 0644) + Expect(err).ToNot(HaveOccurred()) + f1, err = os.Open(fmt.Sprintf("%s/%s", tmpRoot, "f1.lol")) + Expect(err).ToNot(HaveOccurred()) + }) + + PIt("generates two revisions", func() { + //runtime.GOMAXPROCS(1) // uncomment to remove concurrency and see revisions working. + wg := &sync.WaitGroup{} + wg.Add(2) + + // upload file with contents: "test" + go func(wg *sync.WaitGroup) { + _ = fs.Upload(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "uploaded.txt"}, + }, f) + wg.Done() + }(wg) + + // upload file with contents: "another run" + go func(wg *sync.WaitGroup) { + _ = fs.Upload(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "uploaded.txt"}, + }, f1) + wg.Done() + }(wg) + + // this test, by the way the oCIS storage is implemented, is non-deterministic, and the contents + // of uploaded.txt will change on each run depending on which of the 2 routines above makes it + // first into the scheduler. In order to make it deterministic, we have to consider the Upload impl- + // ementation and we can leverage concurrency and add locks only when the destination path are the + // same for 2 uploads. + + wg.Wait() + revisions, err := fs.ListRevisions(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "uploaded.txt"}, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(len(revisions)).To(Equal(1)) + + _, err = ioutil.ReadFile(path.Join(tmpRoot, "nodes", "root", "uploaded.txt")) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + Describe("CreateDir", func() { + It("handle already existing directories", func() { + for i := 0; i < 10; i++ { + go func() { + err := fs.CreateDir(ctx, "fightforit") + if err != nil { + rinfo, err := fs.GetMD(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "fightforit"}, + }, nil) + Expect(err).ToNot(HaveOccurred()) + Expect(rinfo).ToNot(BeNil()) + } + }() + } + }) + }) + }) +}) diff --git a/pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go b/pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go new file mode 100644 index 0000000000..9ea8f67ddd --- /dev/null +++ b/pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package decomposedfs_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestDecomposed(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Decomposed Suite") +} diff --git a/pkg/storage/utils/decomposedfs/decomposedfs_test.go b/pkg/storage/utils/decomposedfs/decomposedfs_test.go new file mode 100644 index 0000000000..2f9043dccd --- /dev/null +++ b/pkg/storage/utils/decomposedfs/decomposedfs_test.go @@ -0,0 +1,94 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package decomposedfs_test + +import ( + "github.com/stretchr/testify/mock" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" + helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" + treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Decomposed", func() { + var ( + env *helpers.TestEnv + + ref *provider.Reference + ) + + BeforeEach(func() { + ref = &provider.Reference{ + Spec: &provider.Reference_Path{ + Path: "dir1", + }, + } + }) + + JustBeforeEach(func() { + var err error + env, err = helpers.NewTestEnv() + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + if env != nil { + env.Cleanup() + } + }) + + Describe("NewDefault", func() { + It("works", func() { + bs := &treemocks.Blobstore{} + _, err := decomposedfs.NewDefault(map[string]interface{}{ + "root": env.Root, + }, bs) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + Describe("Delete", func() { + Context("with insufficient permissions", func() { + It("returns an error", func() { + env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) + + err := env.Fs.Delete(env.Ctx, ref) + + Expect(err).To(MatchError(ContainSubstring("permission denied"))) + }) + }) + + Context("with sufficient permissions", func() { + JustBeforeEach(func() { + env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) + }) + + It("does not (yet) delete the blob from the blobstore", func() { + err := env.Fs.Delete(env.Ctx, ref) + + Expect(err).ToNot(HaveOccurred()) + env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) + }) + }) + }) +}) diff --git a/pkg/storage/fs/ocis/ocis_unix.go b/pkg/storage/utils/decomposedfs/decomposedfs_unix.go similarity index 91% rename from pkg/storage/fs/ocis/ocis_unix.go rename to pkg/storage/utils/decomposedfs/decomposedfs_unix.go index d291514dbb..3cc758ec1b 100644 --- a/pkg/storage/fs/ocis/ocis_unix.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs_unix.go @@ -18,11 +18,11 @@ // +build !windows -package ocis +package decomposedfs import "syscall" -func (fs *ocisfs) getAvailableSize(path string) (uint64, error) { +func (fs *Decomposedfs) getAvailableSize(path string) (uint64, error) { stat := syscall.Statfs_t{} err := syscall.Statfs(path, &stat) if err != nil { diff --git a/pkg/storage/fs/ocis/ocis_windows.go b/pkg/storage/utils/decomposedfs/decomposedfs_windows.go similarity index 92% rename from pkg/storage/fs/ocis/ocis_windows.go rename to pkg/storage/utils/decomposedfs/decomposedfs_windows.go index f1663b491e..4431173404 100644 --- a/pkg/storage/fs/ocis/ocis_windows.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs_windows.go @@ -18,11 +18,11 @@ // +build windows -package ocis +package decomposedfs import "golang.org/x/sys/windows" -func (fs *ocisfs) getAvailableSize(path string) (uint64, error) { +func (fs *Decomposedfs) getAvailableSize(path string) (uint64, error) { var free, total, avail uint64 pathPtr, err := windows.UTF16PtrFromString(path) if err != nil { diff --git a/pkg/storage/fs/s3ng/grants.go b/pkg/storage/utils/decomposedfs/grants.go similarity index 85% rename from pkg/storage/fs/s3ng/grants.go rename to pkg/storage/utils/decomposedfs/grants.go index f669d8b211..09d4465a72 100644 --- a/pkg/storage/fs/s3ng/grants.go +++ b/pkg/storage/utils/decomposedfs/grants.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package s3ng +package decomposedfs import ( "context" @@ -26,13 +26,14 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" "github.com/cs3org/reva/pkg/storage/utils/ace" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/pkg/xattr" ) -func (fs *s3ngfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { +// AddGrant adds a grant to a resource +func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { log := appctx.GetLogger(ctx) log.Debug().Interface("ref", ref).Interface("grant", g).Msg("AddGrant()") var node *node.Node @@ -64,7 +65,8 @@ func (fs *s3ngfs) AddGrant(ctx context.Context, ref *provider.Reference, g *prov return fs.tp.Propagate(ctx, node) } -func (fs *s3ngfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { +// ListGrants lists the grants on the specified resource +func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { var node *node.Node if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { return @@ -104,7 +106,8 @@ func (fs *s3ngfs) ListGrants(ctx context.Context, ref *provider.Reference) (gran return grants, nil } -func (fs *s3ngfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { +// RemoveGrant removes a grant from resource +func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { var node *node.Node if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { return @@ -139,7 +142,8 @@ func (fs *s3ngfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *p return fs.tp.Propagate(ctx, node) } -func (fs *s3ngfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { +// UpdateGrant updates a grant on a resource +func (fs *Decomposedfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 return fs.AddGrant(ctx, ref, g) } diff --git a/pkg/storage/fs/s3ng/lookup.go b/pkg/storage/utils/decomposedfs/lookup.go similarity index 92% rename from pkg/storage/fs/s3ng/lookup.go rename to pkg/storage/utils/decomposedfs/lookup.go index 5cdadaf74a..7b1024d426 100644 --- a/pkg/storage/fs/s3ng/lookup.go +++ b/pkg/storage/utils/decomposedfs/lookup.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package s3ng +package decomposedfs import ( "context" @@ -27,14 +27,15 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/pkg/storage/utils/templates" "github.com/cs3org/reva/pkg/user" ) // Lookup implements transformations from filepath to node and back type Lookup struct { - Options *Options + Options *options.Options } // NodeFromResource takes in a request path or request id and converts it to a Node @@ -105,13 +106,13 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node) (p string, err error) // RootNode returns the root node of the storage func (lu *Lookup) RootNode(ctx context.Context) (*node.Node, error) { - return node.New("root", "", "", 0, nil, lu), nil + return node.New("root", "", "", 0, "", nil, lu), nil } // HomeNode returns the home node of a user func (lu *Lookup) HomeNode(ctx context.Context) (node *node.Node, err error) { if !lu.Options.EnableHome { - return nil, errtypes.NotSupported("s3ngfs: home supported disabled") + return nil, errtypes.NotSupported("Decomposedfs: home supported disabled") } if node, err = lu.RootNode(ctx); err != nil { @@ -127,7 +128,7 @@ func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, f func(c segments := strings.Split(strings.Trim(p, "/"), "/") var err error for i := range segments { - if r, err = r.Child(segments[i]); err != nil { + if r, err = r.Child(ctx, segments[i]); err != nil { return r, err } // if an intermediate node is missing return not found diff --git a/pkg/storage/fs/s3ng/metadata.go b/pkg/storage/utils/decomposedfs/metadata.go similarity index 89% rename from pkg/storage/fs/s3ng/metadata.go rename to pkg/storage/utils/decomposedfs/metadata.go index ca5aefe27d..f16f87e168 100644 --- a/pkg/storage/fs/s3ng/metadata.go +++ b/pkg/storage/utils/decomposedfs/metadata.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package s3ng +package decomposedfs import ( "context" @@ -26,17 +26,18 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/user" "github.com/pkg/errors" "github.com/pkg/xattr" ) -func (fs *s3ngfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { +// SetArbitraryMetadata sets the metadata on a resource +func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { n, err := fs.lu.NodeFromResource(ctx, ref) if err != nil { - return errors.Wrap(err, "s3ngfs: error resolving ref") + return errors.Wrap(err, "Decomposedfs: error resolving ref") } sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() @@ -103,7 +104,7 @@ func (fs *s3ngfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Refere for k, v := range md.Metadata { attrName := xattrs.MetadataPrefix + k if err = xattr.Set(nodePath, attrName, []byte(v)); err != nil { - errs = append(errs, errors.Wrap(err, "s3ngfs: could not set metadata attribute "+attrName+" to "+k)) + errs = append(errs, errors.Wrap(err, "Decomposedfs: could not set metadata attribute "+attrName+" to "+k)) } } @@ -120,10 +121,11 @@ func (fs *s3ngfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Refere } } -func (fs *s3ngfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { +// UnsetArbitraryMetadata unsets the metadata on the given resource +func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { n, err := fs.lu.NodeFromResource(ctx, ref) if err != nil { - return errors.Wrap(err, "s3ngfs: error resolving ref") + return errors.Wrap(err, "Decomposedfs: error resolving ref") } sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() diff --git a/pkg/storage/fs/s3ng/mocks/PermissionsChecker.go b/pkg/storage/utils/decomposedfs/mocks/PermissionsChecker.go similarity index 97% rename from pkg/storage/fs/s3ng/mocks/PermissionsChecker.go rename to pkg/storage/utils/decomposedfs/mocks/PermissionsChecker.go index 83d797df9e..2d05f81ec5 100644 --- a/pkg/storage/fs/s3ng/mocks/PermissionsChecker.go +++ b/pkg/storage/utils/decomposedfs/mocks/PermissionsChecker.go @@ -23,7 +23,7 @@ package mocks import ( context "context" - node "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" + node "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" mock "github.com/stretchr/testify/mock" providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" diff --git a/pkg/storage/fs/s3ng/mocks/Tree.go b/pkg/storage/utils/decomposedfs/mocks/Tree.go similarity index 99% rename from pkg/storage/fs/s3ng/mocks/Tree.go rename to pkg/storage/utils/decomposedfs/mocks/Tree.go index 3213927656..0874e2bbc9 100644 --- a/pkg/storage/fs/s3ng/mocks/Tree.go +++ b/pkg/storage/utils/decomposedfs/mocks/Tree.go @@ -26,7 +26,7 @@ import ( mock "github.com/stretchr/testify/mock" - node "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" + node "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" os "os" diff --git a/pkg/storage/fs/s3ng/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go similarity index 83% rename from pkg/storage/fs/s3ng/node/node.go rename to pkg/storage/utils/decomposedfs/node/node.go index b68dd9af8e..bfd1a93bd2 100644 --- a/pkg/storage/fs/s3ng/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -31,6 +31,7 @@ import ( "strings" "time" + "github.com/google/uuid" "github.com/pkg/errors" "github.com/pkg/xattr" @@ -41,8 +42,8 @@ import ( "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/mime" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" "github.com/cs3org/reva/pkg/storage/utils/ace" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/user" ) @@ -52,6 +53,11 @@ const ( ShareTypesKey = "http://owncloud.org/ns/share-types" ChecksumsKey = "http://owncloud.org/ns/checksums" UserShareType = "0" + QuotaKey = "quota" + + QuotaUncalculated = "-1" + QuotaUnknown = "-2" + QuotaUnlimited = "-3" ) // Node represents a node in the tree and provides methods to get a Parent or Child instance @@ -60,6 +66,7 @@ type Node struct { ID string Name string Blobsize int64 + BlobID string owner *userpb.UserId Exists bool @@ -77,7 +84,10 @@ type PathLookup interface { } // New returns a new instance of Node -func New(id, parentID, name string, blobsize int64, owner *userpb.UserId, lu PathLookup) *Node { +func New(id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node { + if blobID == "" { + blobID = uuid.New().String() + } return &Node{ ID: id, ParentID: parentID, @@ -85,6 +95,7 @@ func New(id, parentID, name string, blobsize int64, owner *userpb.UserId, lu Pat Blobsize: blobsize, owner: owner, lu: lu, + BlobID: blobID, } } @@ -92,27 +103,30 @@ func New(id, parentID, name string, blobsize int64, owner *userpb.UserId, lu Pat func (n *Node) WriteMetadata(owner *userpb.UserId) (err error) { nodePath := n.InternalPath() if err = xattr.Set(nodePath, xattrs.ParentidAttr, []byte(n.ParentID)); err != nil { - return errors.Wrap(err, "s3ngfs: could not set parentid attribute") + return errors.Wrap(err, "Decomposedfs: could not set parentid attribute") } if err = xattr.Set(nodePath, xattrs.NameAttr, []byte(n.Name)); err != nil { - return errors.Wrap(err, "s3ngfs: could not set name attribute") + return errors.Wrap(err, "Decomposedfs: could not set name attribute") + } + if err = xattr.Set(nodePath, xattrs.BlobIDAttr, []byte(n.BlobID)); err != nil { + return errors.Wrap(err, "Decomposedfs: could not set blobid attribute") } if err = xattr.Set(nodePath, xattrs.BlobsizeAttr, []byte(fmt.Sprintf("%d", n.Blobsize))); err != nil { - return errors.Wrap(err, "s3ngfs: could not set blobsize attribute") + return errors.Wrap(err, "Decomposedfs: could not set blobsize attribute") } if owner == nil { if err = xattr.Set(nodePath, xattrs.OwnerIDAttr, []byte("")); err != nil { - return errors.Wrap(err, "s3ngfs: could not set empty owner id attribute") + return errors.Wrap(err, "Decomposedfs: could not set empty owner id attribute") } if err = xattr.Set(nodePath, xattrs.OwnerIDPAttr, []byte("")); err != nil { - return errors.Wrap(err, "s3ngfs: could not set empty owner idp attribute") + return errors.Wrap(err, "Decomposedfs: could not set empty owner idp attribute") } } else { if err = xattr.Set(nodePath, xattrs.OwnerIDAttr, []byte(owner.OpaqueId)); err != nil { - return errors.Wrap(err, "s3ngfs: could not set owner id attribute") + return errors.Wrap(err, "Decomposedfs: could not set owner id attribute") } if err = xattr.Set(nodePath, xattrs.OwnerIDPAttr, []byte(owner.Idp)); err != nil { - return errors.Wrap(err, "s3ngfs: could not set owner idp attribute") + return errors.Wrap(err, "Decomposedfs: could not set owner idp attribute") } } return @@ -146,81 +160,56 @@ func ReadNode(ctx context.Context, lu PathLookup, id string) (n *Node, err error } else { return } - // Lookup blobsize - if attrBytes, err = xattr.Get(nodePath, xattrs.BlobsizeAttr); err == nil { - var blobSize int64 - if blobSize, err = strconv.ParseInt(string(attrBytes), 10, 64); err == nil { - n.Blobsize = blobSize - } else { - return - } + // lookup blobID in extended attributes + if attrBytes, err = xattr.Get(nodePath, xattrs.BlobIDAttr); err == nil { + n.BlobID = string(attrBytes) } else { return } - - var root *Node - if root, err = lu.HomeOrRootNode(ctx); err != nil { + // Lookup blobsize + var blobSize int64 + if blobSize, err = ReadBlobSizeAttr(nodePath); err == nil { + n.Blobsize = blobSize + } else { return } - parentID := n.ParentID - log := appctx.GetLogger(ctx) - for parentID != root.ID { - log.Debug().Interface("node", n).Str("root.ID", root.ID).Msg("ReadNode()") - // walk to root to check node is not part of a deleted subtree - - if attrBytes, err = xattr.Get(lu.InternalPath(parentID), xattrs.ParentidAttr); err == nil { - parentID = string(attrBytes) - log.Debug().Interface("node", n).Str("root.ID", root.ID).Str("parentID", parentID).Msg("ReadNode() found parent") - } else { - log.Error().Err(err).Interface("node", n).Str("root.ID", root.ID).Msg("ReadNode()") - if isNotFound(err) { - return nil, errtypes.NotFound(err.Error()) - } - return + // Check if parent exists. Otherwise this node is part of a deleted subtree + _, err = os.Stat(lu.InternalPath(n.ParentID)) + if err != nil { + if isNotFound(err) { + return nil, errtypes.NotFound(err.Error()) } + return nil, err } - n.Exists = true - log.Debug().Interface("node", n).Msg("ReadNode() found node") - return } // Child returns the child node with the given name -func (n *Node) Child(name string) (*Node, error) { - c := &Node{ - lu: n.lu, - ParentID: n.ID, - Name: name, - } - +func (n *Node) Child(ctx context.Context, name string) (*Node, error) { link, err := os.Readlink(filepath.Join(n.InternalPath(), name)) if err != nil { if os.IsNotExist(err) { + c := &Node{ + lu: n.lu, + ParentID: n.ID, + Name: name, + } return c, nil // if the file does not exist we return a node that has Exists = false } - return nil, errors.Wrap(err, "s3ngfs: Wrap: readlink error") + return nil, errors.Wrap(err, "Decomposedfs: Wrap: readlink error") } + var c *Node if strings.HasPrefix(link, "../") { - c.Exists = true - c.ID = filepath.Base(link) - } else { - return nil, fmt.Errorf("s3ngfs: expected '../ prefix, got' %+v", link) - } - - // Lookup blobsize - if attrBytes, err := xattr.Get(c.InternalPath(), xattrs.BlobsizeAttr); err == nil { - blobSize, err := strconv.ParseInt(string(attrBytes), 10, 64) + c, err = ReadNode(ctx, n.lu, filepath.Base(link)) if err != nil { - return nil, errors.Wrap(err, "node: could not parse blob size") - + return nil, errors.Wrap(err, "could not read child node") } - c.Blobsize = blobSize } else { - return nil, errors.Wrap(err, "node: could not read blob size") + return nil, fmt.Errorf("Decomposedfs: expected '../ prefix, got' %+v", link) } return c, nil @@ -229,7 +218,7 @@ func (n *Node) Child(name string) (*Node, error) { // Parent returns the parent node func (n *Node) Parent() (p *Node, err error) { if n.ParentID == "" { - return nil, fmt.Errorf("s3ngfs: root has no parent") + return nil, fmt.Errorf("Decomposedfs: root has no parent") } p = &Node{ lu: n.lu, @@ -455,6 +444,16 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi PermissionSet: rp, } + if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + ts, err := n.GetTreeSize() + if err == nil { + ri.Size = ts + } else { + ri.Size = 0 // make dirs always return 0 if it is unknown + sublog.Debug().Err(err).Msg("could not read treesize") + } + } + if ri.Owner, err = n.Owner(); err != nil { sublog.Debug().Err(err).Msg("could not determine owner") } @@ -530,6 +529,16 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSMD5, ri) readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSAdler32, ri) } + // quota + if _, ok := mdKeysMap[QuotaKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER) && returnAllKeys || ok { + var quotaPath string + if r, err := n.lu.HomeOrRootNode(ctx); err == nil { + quotaPath = r.InternalPath() + readQuotaIntoOpaque(ctx, quotaPath, ri) + } else { + sublog.Error().Err(err).Msg("error determining home or root node for quota") + } + } // only read the requested metadata attributes attrs, err := xattr.List(nodePath) @@ -605,6 +614,38 @@ func readChecksumIntoOpaque(ctx context.Context, nodePath, algo string, ri *prov } } +// quota is always stored on the root node +func readQuotaIntoOpaque(ctx context.Context, nodePath string, ri *provider.ResourceInfo) { + v, err := xattr.Get(nodePath, xattrs.QuotaAttr) + switch { + case err == nil: + // make sure we have a proper signed int + // we use the same magic numbers to indicate: + // -1 = uncalculated + // -2 = unknown + // -3 = unlimited + if _, err := strconv.ParseInt(string(v), 10, 64); err == nil { + if ri.Opaque == nil { + ri.Opaque = &types.Opaque{ + Map: map[string]*types.OpaqueEntry{}, + } + } + ri.Opaque.Map[QuotaKey] = &types.OpaqueEntry{ + Decoder: "plain", + Value: v, + } + } else { + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("quota", string(v)).Msg("malformed quota") + } + case isNoData(err): + appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Msg("quota not set") + case isNotFound(err): + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("file not found when reading quota") + default: + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read quota") + } +} + // HasPropagation checks if the propagation attribute exists and is set to "1" func (n *Node) HasPropagation() (propagation bool) { if b, err := xattr.Get(n.lu.InternalPath(n.ID), xattrs.PropagationAttr); err == nil { @@ -627,6 +668,20 @@ func (n *Node) SetTMTime(t time.Time) (err error) { return xattr.Set(n.lu.InternalPath(n.ID), xattrs.TreeMTimeAttr, []byte(t.UTC().Format(time.RFC3339Nano))) } +// GetTreeSize reads the treesize from the extended attributes +func (n *Node) GetTreeSize() (treesize uint64, err error) { + var b []byte + if b, err = xattr.Get(n.InternalPath(), xattrs.TreesizeAttr); err != nil { + return + } + return strconv.ParseUint(string(b), 10, 64) +} + +// SetTreeSize writes the treesize to the extended attributes +func (n *Node) SetTreeSize(ts uint64) (err error) { + return xattr.Set(n.InternalPath(), xattrs.TreesizeAttr, []byte(strconv.FormatUint(ts, 10))) +} + // SetChecksum writes the checksum with the given checksum type to the extended attributes func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { return xattr.Set(n.lu.InternalPath(n.ID), xattrs.ChecksumPrefix+csType, h.Sum(nil)) @@ -753,6 +808,19 @@ func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant return e.Grant(), nil } +// ReadBlobSizeAttr reads the blobsize from the xattrs +func ReadBlobSizeAttr(path string) (int64, error) { + attrBytes, err := xattr.Get(path, xattrs.BlobsizeAttr) + if err != nil { + return 0, errors.Wrapf(err, "error reading blobsize xattr") + } + blobSize, err := strconv.ParseInt(string(attrBytes), 10, 64) + if err != nil { + return 0, errors.Wrapf(err, "invalid blobsize xattr format") + } + return blobSize, nil +} + func (n *Node) hasUserShares(ctx context.Context) bool { g, err := n.ListGrantees(ctx) if err != nil { diff --git a/pkg/storage/fs/s3ng/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_suite_test.go similarity index 87% rename from pkg/storage/fs/s3ng/node/node_test.go rename to pkg/storage/utils/decomposedfs/node/node_suite_test.go index db63f9dfe2..6fd6e84f43 100644 --- a/pkg/storage/fs/s3ng/node/node_test.go +++ b/pkg/storage/utils/decomposedfs/node/node_suite_test.go @@ -19,9 +19,13 @@ package node_test import ( + "testing" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) -var _ = Describe("Node", func() { - -}) +func TestNode(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Suite") +} diff --git a/pkg/storage/utils/decomposedfs/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_test.go new file mode 100644 index 0000000000..f75cee3ef7 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/node/node_test.go @@ -0,0 +1,150 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package node_test + +import ( + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Node", func() { + var ( + env *helpers.TestEnv + + id string + name string + ) + + BeforeEach(func() { + var err error + env, err = helpers.NewTestEnv() + Expect(err).ToNot(HaveOccurred()) + + id = "fooId" + name = "foo" + }) + + AfterEach(func() { + if env != nil { + env.Cleanup() + } + }) + + Describe("New", func() { + It("generates unique blob ids if none are given", func() { + n1 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) + n2 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) + + Expect(len(n1.BlobID)).To(Equal(36)) + Expect(n1.BlobID).ToNot(Equal(n2.BlobID)) + }) + }) + + Describe("ReadNode", func() { + It("reads the blobID from the xattrs", func() { + lookupNode, err := env.Lookup.NodeFromPath(env.Ctx, "dir1/file1") + Expect(err).ToNot(HaveOccurred()) + + n, err := node.ReadNode(env.Ctx, env.Lookup, lookupNode.ID) + Expect(err).ToNot(HaveOccurred()) + Expect(n.BlobID).To(Equal("file1-blobid")) + }) + }) + + Describe("WriteMetadata", func() { + It("writes all xattrs", func() { + n, err := env.Lookup.NodeFromPath(env.Ctx, "dir1/file1") + Expect(err).ToNot(HaveOccurred()) + + blobsize := 239485734 + n.Name = "TestName" + n.BlobID = "TestBlobID" + n.Blobsize = int64(blobsize) + owner := &userpb.UserId{ + Idp: "testidp", + OpaqueId: "testuserid", + } + + err = n.WriteMetadata(owner) + Expect(err).ToNot(HaveOccurred()) + n2, err := env.Lookup.NodeFromPath(env.Ctx, "dir1/file1") + Expect(err).ToNot(HaveOccurred()) + Expect(n2.Name).To(Equal("TestName")) + Expect(n2.BlobID).To(Equal("TestBlobID")) + Expect(n2.Blobsize).To(Equal(int64(blobsize))) + }) + }) + + Describe("Parent", func() { + It("returns the parent node", func() { + child, err := env.Lookup.NodeFromPath(env.Ctx, "dir1/subdir1") + Expect(err).ToNot(HaveOccurred()) + Expect(child).ToNot(BeNil()) + + parent, err := child.Parent() + Expect(err).ToNot(HaveOccurred()) + Expect(parent).ToNot(BeNil()) + Expect(parent.ID).To(Equal(child.ParentID)) + }) + }) + + Describe("Child", func() { + var ( + parent *node.Node + ) + + BeforeEach(func() { + var err error + parent, err = env.Lookup.NodeFromPath(env.Ctx, "dir1") + Expect(err).ToNot(HaveOccurred()) + Expect(parent).ToNot(BeNil()) + }) + + It("returns an empty node if the child does not exist", func() { + child, err := parent.Child(env.Ctx, "does-not-exist") + Expect(err).ToNot(HaveOccurred()) + Expect(child).ToNot(BeNil()) + Expect(child.Exists).To(BeFalse()) + }) + + It("returns a directory node with all metadata", func() { + child, err := parent.Child(env.Ctx, "subdir1") + Expect(err).ToNot(HaveOccurred()) + Expect(child).ToNot(BeNil()) + Expect(child.Exists).To(BeTrue()) + Expect(child.ParentID).To(Equal(parent.ID)) + Expect(child.Name).To(Equal("subdir1")) + Expect(child.Blobsize).To(Equal(int64(0))) + }) + + It("returns a file node with all metadata", func() { + child, err := parent.Child(env.Ctx, "file1") + Expect(err).ToNot(HaveOccurred()) + Expect(child).ToNot(BeNil()) + Expect(child.Exists).To(BeTrue()) + Expect(child.ParentID).To(Equal(parent.ID)) + Expect(child.Name).To(Equal("file1")) + Expect(child.Blobsize).To(Equal(int64(1234))) + }) + }) +}) diff --git a/pkg/storage/fs/s3ng/node/permissions.go b/pkg/storage/utils/decomposedfs/node/permissions.go similarity index 97% rename from pkg/storage/fs/s3ng/node/permissions.go rename to pkg/storage/utils/decomposedfs/node/permissions.go index 4e68ea6cc1..ea3e5cae9d 100644 --- a/pkg/storage/fs/s3ng/node/permissions.go +++ b/pkg/storage/utils/decomposedfs/node/permissions.go @@ -26,7 +26,7 @@ import ( userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/user" "github.com/pkg/errors" "github.com/pkg/xattr" @@ -127,7 +127,7 @@ func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap *pro } if cn, err = cn.Parent(); err != nil { - return ap, errors.Wrap(err, "s3ngfs: error getting parent "+cn.ParentID) + return ap, errors.Wrap(err, "Decomposedfs: error getting parent "+cn.ParentID) } } @@ -228,7 +228,7 @@ func (p *Permissions) HasPermission(ctx context.Context, n *Node, check func(*pr } if cn, err = cn.Parent(); err != nil { - return false, errors.Wrap(err, "s3ngfs: error getting parent "+cn.ParentID) + return false, errors.Wrap(err, "Decomposedfs: error getting parent "+cn.ParentID) } } diff --git a/pkg/storage/fs/ocis/option.go b/pkg/storage/utils/decomposedfs/options/options.go similarity index 60% rename from pkg/storage/fs/ocis/option.go rename to pkg/storage/utils/decomposedfs/options/options.go index 48a09308ea..d48a178730 100644 --- a/pkg/storage/fs/ocis/option.go +++ b/pkg/storage/utils/decomposedfs/options/options.go @@ -16,7 +16,15 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package ocis +package options + +import ( + "path/filepath" + "strings" + + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) // Option defines a single option function. type Option func(o *Options) @@ -45,57 +53,28 @@ type Options struct { Owner string `mapstructure:"owner"` } -// newOptions initializes the available default options. -/* for future use, commented to make linter happy -func newOptions(opts ...Option) Options { - opt := Options{} - - for _, o := range opts { - o(&opt) - } - - return opt -} -*/ - -// Root provides a function to set the root option. -func Root(val string) Option { - return func(o *Options) { - o.Root = val - } -} - -// UserLayout provides a function to set the user layout option. -func UserLayout(val string) Option { - return func(o *Options) { - o.UserLayout = val +// New returns a new Options instance for the given configuration +func New(m map[string]interface{}) (*Options, error) { + o := &Options{} + if err := mapstructure.Decode(m, o); err != nil { + err = errors.Wrap(err, "error decoding conf") + return nil, err } -} -// ShareFolder provides a function to set the ShareFolder option. -func ShareFolder(val string) Option { - return func(o *Options) { - o.ShareFolder = val + if o.UserLayout == "" { + o.UserLayout = "{{.Id.OpaqueId}}" } -} + // ensure user layout has no starting or trailing / + o.UserLayout = strings.Trim(o.UserLayout, "/") -// EnableHome provides a function to set the EnableHome option. -func EnableHome(val bool) Option { - return func(o *Options) { - o.EnableHome = val + if o.ShareFolder == "" { + o.ShareFolder = "/Shares" } -} + // ensure share folder always starts with slash + o.ShareFolder = filepath.Join("/", o.ShareFolder) -// TreeTimeAccounting provides a function to set the TreeTimeAccounting option. -func TreeTimeAccounting(val bool) Option { - return func(o *Options) { - o.TreeTimeAccounting = val - } -} + // c.DataDirectory should never end in / unless it is the root + o.Root = filepath.Clean(o.Root) -// TreeSizeAccounting provides a function to set the TreeSizeAccounting option. -func TreeSizeAccounting(val bool) Option { - return func(o *Options) { - o.TreeSizeAccounting = val - } + return o, nil } diff --git a/pkg/storage/utils/decomposedfs/options/options_suite_test.go b/pkg/storage/utils/decomposedfs/options/options_suite_test.go new file mode 100644 index 0000000000..233675759f --- /dev/null +++ b/pkg/storage/utils/decomposedfs/options/options_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package options_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestOptions(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Options Suite") +} diff --git a/pkg/storage/utils/decomposedfs/options/options_test.go b/pkg/storage/utils/decomposedfs/options/options_test.go new file mode 100644 index 0000000000..a74825c2b6 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/options/options_test.go @@ -0,0 +1,60 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package options_test + +import ( + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Options", func() { + var ( + o *options.Options + config map[string]interface{} + ) + + BeforeEach(func() { + config = map[string]interface{}{} + }) + + Describe("New", func() { + JustBeforeEach(func() { + var err error + o, err = options.New(config) + Expect(err).ToNot(HaveOccurred()) + }) + + It("sets defaults", func() { + Expect(len(o.ShareFolder) > 0).To(BeTrue()) + Expect(len(o.UserLayout) > 0).To(BeTrue()) + }) + + Context("with unclean root path configuration", func() { + BeforeEach(func() { + config["root"] = "foo/" + }) + + It("sanitizes the root path", func() { + Expect(o.Root).To(Equal("foo")) + }) + }) + }) +}) diff --git a/pkg/storage/fs/s3ng/recycle.go b/pkg/storage/utils/decomposedfs/recycle.go similarity index 90% rename from pkg/storage/fs/s3ng/recycle.go rename to pkg/storage/utils/decomposedfs/recycle.go index e0a5ff5fe4..b75d568b6e 100644 --- a/pkg/storage/fs/s3ng/recycle.go +++ b/pkg/storage/utils/decomposedfs/recycle.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package s3ng +package decomposedfs import ( "context" @@ -29,8 +29,8 @@ import ( types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/pkg/user" "github.com/pkg/errors" "github.com/pkg/xattr" @@ -44,7 +44,8 @@ import ( // TODO For an efficient listing of deleted nodes the ocis storages trash folder should have // contain a directory with symlinks to trash files for every userid/"root" -func (fs *s3ngfs) ListRecycle(ctx context.Context) (items []*provider.RecycleItem, err error) { +// ListRecycle returns the list of available recycle items +func (fs *Decomposedfs) ListRecycle(ctx context.Context) (items []*provider.RecycleItem, err error) { log := appctx.GetLogger(ctx) trashRoot := fs.getRecycleRoot(ctx) @@ -144,7 +145,8 @@ func (fs *s3ngfs) ListRecycle(ctx context.Context) (items []*provider.RecycleIte return } -func (fs *s3ngfs) RestoreRecycleItem(ctx context.Context, key string) error { +// RestoreRecycleItem restores the specified item +func (fs *Decomposedfs) RestoreRecycleItem(ctx context.Context, key string) error { rn, restoreFunc, err := fs.tp.RestoreRecycleItemFunc(ctx, key) if err != nil { return err @@ -165,7 +167,8 @@ func (fs *s3ngfs) RestoreRecycleItem(ctx context.Context, key string) error { return restoreFunc() } -func (fs *s3ngfs) PurgeRecycleItem(ctx context.Context, key string) error { +// PurgeRecycleItem purges the specified item +func (fs *Decomposedfs) PurgeRecycleItem(ctx context.Context, key string) error { rn, purgeFunc, err := fs.tp.PurgeRecycleItemFunc(ctx, key) if err != nil { return err @@ -186,7 +189,8 @@ func (fs *s3ngfs) PurgeRecycleItem(ctx context.Context, key string) error { return purgeFunc() } -func (fs *s3ngfs) EmptyRecycle(ctx context.Context) error { +// EmptyRecycle empties the trash +func (fs *Decomposedfs) EmptyRecycle(ctx context.Context) error { u, ok := user.ContextGetUser(ctx) // TODO what permission should we check? we could check the root node of the user? or the owner permissions on his home root node? // The current impl will wipe your own trash. or when no user provided the trash of 'root' @@ -205,7 +209,7 @@ func getResourceType(isDir bool) provider.ResourceType { return provider.ResourceType_RESOURCE_TYPE_FILE } -func (fs *s3ngfs) getRecycleRoot(ctx context.Context) string { +func (fs *Decomposedfs) getRecycleRoot(ctx context.Context) string { if fs.o.EnableHome { u := user.ContextMustGetUser(ctx) // TODO use layout, see Tree.Delete() for problem diff --git a/pkg/storage/fs/s3ng/revisions.go b/pkg/storage/utils/decomposedfs/revisions.go similarity index 85% rename from pkg/storage/fs/s3ng/revisions.go rename to pkg/storage/utils/decomposedfs/revisions.go index e09a8c8113..5dec772b47 100644 --- a/pkg/storage/fs/s3ng/revisions.go +++ b/pkg/storage/utils/decomposedfs/revisions.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package s3ng +package decomposedfs import ( "context" @@ -29,7 +29,7 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/pkg/errors" ) @@ -41,7 +41,8 @@ import ( // We can add a background process to move old revisions to a slower storage // and replace the revision file with a symbolic link in the future, if necessary. -func (fs *s3ngfs) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { +// ListRevisions lists the revisions of the given resource +func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { var n *node.Node if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { return @@ -68,9 +69,13 @@ func (fs *s3ngfs) ListRevisions(ctx context.Context, ref *provider.Reference) (r if fi, err := os.Stat(items[i]); err == nil { rev := &provider.FileVersion{ Key: filepath.Base(items[i]), - Size: uint64(fi.Size()), Mtime: uint64(fi.ModTime().Unix()), } + blobSize, err := node.ReadBlobSizeAttr(items[i]) + if err != nil { + return nil, errors.Wrapf(err, "error reading blobsize xattr") + } + rev.Size = uint64(blobSize) revisions = append(revisions, rev) } } @@ -78,7 +83,8 @@ func (fs *s3ngfs) ListRevisions(ctx context.Context, ref *provider.Reference) (r return } -func (fs *s3ngfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { +// DownloadRevision returns a reader for the specified revision +func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { log := appctx.GetLogger(ctx) // verify revision key format @@ -117,12 +123,13 @@ func (fs *s3ngfs) DownloadRevision(ctx context.Context, ref *provider.Reference, if os.IsNotExist(err) { return nil, errtypes.NotFound(contentPath) } - return nil, errors.Wrap(err, "s3ngfs: error opening revision "+revisionKey) + return nil, errors.Wrap(err, "Decomposedfs: error opening revision "+revisionKey) } return r, nil } -func (fs *s3ngfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (err error) { +// RestoreRevision restores the specified revision of the resource +func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (err error) { log := appctx.GetLogger(ctx) // verify revision key format diff --git a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go new file mode 100644 index 0000000000..f7d950ca71 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go @@ -0,0 +1,187 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package helpers + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/google/uuid" + "github.com/stretchr/testify/mock" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/mocks" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" + treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" + ruser "github.com/cs3org/reva/pkg/user" +) + +// TestEnv represents a test environment for unit tests +type TestEnv struct { + Root string + Fs storage.FS + Tree *tree.Tree + Permissions *mocks.PermissionsChecker + Blobstore *treemocks.Blobstore + Owner *userpb.User + Lookup *decomposedfs.Lookup + Ctx context.Context +} + +// NewTestEnv prepares a test environment on disk +// The storage contains some directories and a file: +// +// dir1/ +// dir1/file1 +// dir1/subdir1/ +func NewTestEnv() (*TestEnv, error) { + tmpRoot, err := ioutil.TempDir("", "reva-unit-tests-*-root") + if err != nil { + return nil, err + } + + config := map[string]interface{}{ + "root": tmpRoot, + "enable_home": true, + "treetime_accounting": true, + "treesize_accounting": true, + "share_folder": "/Shares", + "user_layout": "{{.Id.OpaqueId}}", + } + o, err := options.New(config) + if err != nil { + return nil, err + } + + owner := &userpb.User{ + Id: &userpb.UserId{ + Idp: "idp", + OpaqueId: "userid", + }, + Username: "username", + } + lookup := &decomposedfs.Lookup{Options: o} + permissions := &mocks.PermissionsChecker{} + permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Times(3) // Permissions required for setup below + bs := &treemocks.Blobstore{} + tree := tree.New(o.Root, true, true, lookup, bs) + fs, err := decomposedfs.New(o, lookup, permissions, tree) + if err != nil { + return nil, err + } + ctx := ruser.ContextSetUser(context.Background(), owner) + + env := &TestEnv{ + Root: tmpRoot, + Fs: fs, + Tree: tree, + Lookup: lookup, + Permissions: permissions, + Blobstore: bs, + Owner: owner, + Ctx: ctx, + } + + // Create home + err = fs.CreateHome(ctx) + if err != nil { + return nil, err + } + + // Create dir1 + dir1, err := env.CreateTestDir("dir1") + if err != nil { + return nil, err + } + + // Create file1 in dir1 + _, err = env.CreateTestFile("file1", "file1-blobid", 1234, dir1.ID) + if err != nil { + return nil, err + } + + // Create subdir1 in dir1 + err = fs.CreateDir(ctx, "dir1/subdir1") + if err != nil { + return nil, err + } + + // Create emptydir + err = fs.CreateDir(ctx, "emptydir") + if err != nil { + return nil, err + } + + return env, nil +} + +// Cleanup removes all files from disk +func (t *TestEnv) Cleanup() { + os.RemoveAll(t.Root) +} + +// CreateTestDir create a directory and returns a corresponding Node +func (t *TestEnv) CreateTestDir(name string) (*node.Node, error) { + err := t.Fs.CreateDir(t.Ctx, name) + if err != nil { + return nil, err + } + n, err := t.Lookup.NodeFromPath(t.Ctx, name) + if err != nil { + return nil, err + } + + return n, nil +} + +// CreateTestFile creates a new file and its metadata and returns a corresponding Node +func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID string) (*node.Node, error) { + // Create file in dir1 + file := node.New( + uuid.New().String(), + parentID, + name, + blobSize, + blobID, + nil, + t.Lookup, + ) + _, err := os.OpenFile(file.InternalPath(), os.O_CREATE, 0700) + if err != nil { + return nil, err + } + err = file.WriteMetadata(t.Owner.Id) + if err != nil { + return nil, err + } + // Link in parent + childNameLink := filepath.Join(t.Lookup.InternalPath(file.ParentID), file.Name) + err = os.Symlink("../"+file.ID, childNameLink) + if err != nil { + return nil, err + } + + return file, err +} diff --git a/pkg/storage/fs/s3ng/tree/mocks/Blobstore.go b/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go similarity index 100% rename from pkg/storage/fs/s3ng/tree/mocks/Blobstore.go rename to pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go diff --git a/pkg/storage/fs/s3ng/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go similarity index 75% rename from pkg/storage/fs/s3ng/tree/tree.go rename to pkg/storage/utils/decomposedfs/tree/tree.go index eb160719bd..f6517feaf7 100644 --- a/pkg/storage/fs/s3ng/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -20,9 +20,11 @@ package tree import ( "context" + "fmt" "io" "os" "path/filepath" + "strconv" "strings" "time" @@ -30,8 +32,8 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/xattrs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/google/uuid" "github.com/pkg/errors" "github.com/pkg/xattr" @@ -102,7 +104,7 @@ func (t *Tree) Setup(owner string) error { // the root node has an empty name // the root node has no parent - n := node.New("root", "", "", 0, nil, t.lookup) + n := node.New("root", "", "", 0, "", nil, t.lookup) err := t.createNode( n, &userpb.UserId{ @@ -169,7 +171,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) if newNode.Exists { // TODO make sure all children are deleted if err := os.RemoveAll(newNode.InternalPath()); err != nil { - return errors.Wrap(err, "s3ngfs: Move: error deleting target node "+newNode.ID) + return errors.Wrap(err, "Decomposedfs: Move: error deleting target node "+newNode.ID) } } @@ -189,12 +191,12 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) filepath.Join(parentPath, newNode.Name), ) if err != nil { - return errors.Wrap(err, "s3ngfs: could not rename child") + return errors.Wrap(err, "Decomposedfs: could not rename child") } // update name attribute if err := xattr.Set(tgtPath, xattrs.NameAttr, []byte(newNode.Name)); err != nil { - return errors.Wrap(err, "s3ngfs: could not set name attribute") + return errors.Wrap(err, "Decomposedfs: could not set name attribute") } return t.Propagate(ctx, newNode) @@ -209,15 +211,15 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) filepath.Join(t.lookup.InternalPath(newNode.ParentID), newNode.Name), ) if err != nil { - return errors.Wrap(err, "s3ngfs: could not move child") + return errors.Wrap(err, "Decomposedfs: could not move child") } // update target parentid and name if err := xattr.Set(tgtPath, xattrs.ParentidAttr, []byte(newNode.ParentID)); err != nil { - return errors.Wrap(err, "s3ngfs: could not set parentid attribute") + return errors.Wrap(err, "Decomposedfs: could not set parentid attribute") } if err := xattr.Set(tgtPath, xattrs.NameAttr, []byte(newNode.Name)); err != nil { - return errors.Wrap(err, "s3ngfs: could not set name attribute") + return errors.Wrap(err, "Decomposedfs: could not set name attribute") } // TODO inefficient because we might update several nodes twice, only propagate unchanged nodes? @@ -226,11 +228,11 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) err = t.Propagate(ctx, oldNode) if err != nil { - return errors.Wrap(err, "s3ngfs: Move: could not propagate old node") + return errors.Wrap(err, "Decomposedfs: Move: could not propagate old node") } err = t.Propagate(ctx, newNode) if err != nil { - return errors.Wrap(err, "s3ngfs: Move: could not propagate new node") + return errors.Wrap(err, "Decomposedfs: Move: could not propagate new node") } return nil } @@ -337,7 +339,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { p, err := n.Parent() if err != nil { - return errors.Wrap(err, "s3ngfs: error getting parent "+n.ParentID) + return errors.Wrap(err, "Decomposedfs: error getting parent "+n.ParentID) } return t.Propagate(ctx, p) } @@ -393,15 +395,17 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, key string) (*node.Node } fn := func() error { - if err := os.Remove(deletedNodePath); err != nil { + if err := os.RemoveAll(deletedNodePath); err != nil { log.Error().Err(err).Str("deletedNodePath", deletedNodePath).Msg("error deleting trash node") return err } // delete blob from blobstore - if err = t.DeleteBlob(rn.ID); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item blob") - return err + if rn.BlobID != "" { + if err = t.DeleteBlob(rn.BlobID); err != nil { + log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item blob") + return err + } } // delete item link in trash @@ -418,12 +422,12 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, key string) (*node.Node // Propagate propagates changes to the root of the tree func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { + sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() if !t.treeTimeAccounting && !t.treeSizeAccounting { // no propagation enabled - log.Debug().Msg("propagation disabled") + sublog.Debug().Msg("propagation disabled") return } - log := appctx.GetLogger(ctx) // is propagation enabled for the parent node? @@ -435,16 +439,19 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { // use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly sTime := time.Now().UTC() + // we loop until we reach the root for err == nil && n.ID != root.ID { - log.Debug().Interface("node", n).Msg("propagating") + sublog.Debug().Msg("propagating") if n, err = n.Parent(); err != nil { break } + sublog = sublog.With().Interface("node", n).Logger() + // TODO none, sync and async? if !n.HasPropagation() { - log.Debug().Interface("node", n).Str("attr", xattrs.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating") + sublog.Debug().Str("attr", xattrs.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating") // if the attribute is not set treat it as false / none / no propagation return nil } @@ -458,20 +465,17 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { switch { case err != nil: // missing attribute, or invalid format, overwrite - log.Debug().Err(err). - Interface("node", n). + sublog.Debug().Err(err). Msg("could not read tmtime attribute, overwriting") updateSyncTime = true case tmTime.Before(sTime): - log.Debug(). - Interface("node", n). + sublog.Debug(). Time("tmtime", tmTime). Time("stime", sTime). Msg("parent tmtime is older than node mtime, updating") updateSyncTime = true default: - log.Debug(). - Interface("node", n). + sublog.Debug(). Time("tmtime", tmTime). Time("stime", sTime). Dur("delta", sTime.Sub(tmTime)). @@ -481,28 +485,113 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { if updateSyncTime { // update the tree time of the parent node if err = n.SetTMTime(sTime); err != nil { - log.Error().Err(err).Interface("node", n).Time("tmtime", sTime).Msg("could not update tmtime of parent node") - return + sublog.Error().Err(err).Time("tmtime", sTime).Msg("could not update tmtime of parent node") + } else { + sublog.Debug().Time("tmtime", sTime).Msg("updated tmtime of parent node") } - log.Debug().Interface("node", n).Time("tmtime", sTime).Msg("updated tmtime of parent node") } if err := n.UnsetTempEtag(); err != nil { - log.Error().Err(err).Interface("node", n).Msg("could not remove temporary etag attribute") + sublog.Error().Err(err).Msg("could not remove temporary etag attribute") } - } - // TODO size accounting + // size accounting + if t.treeSizeAccounting { + // update the treesize if it differs from the current size + updateTreeSize := false + + var treeSize, calculatedTreeSize uint64 + calculatedTreeSize, err = calculateTreeSize(ctx, n.InternalPath()) + if err != nil { + continue + } + treeSize, err = n.GetTreeSize() + switch { + case err != nil: + // missing attribute, or invalid format, overwrite + sublog.Debug().Err(err).Msg("could not read treesize attribute, overwriting") + updateTreeSize = true + case treeSize != calculatedTreeSize: + sublog.Debug(). + Uint64("treesize", treeSize). + Uint64("calculatedTreeSize", calculatedTreeSize). + Msg("parent treesize is different then calculated treesize, updating") + updateTreeSize = true + default: + sublog.Debug(). + Uint64("treesize", treeSize). + Uint64("calculatedTreeSize", calculatedTreeSize). + Msg("parent size matches calculated size, not updating") + } + + if updateTreeSize { + // update the tree time of the parent node + if err = n.SetTreeSize(calculatedTreeSize); err != nil { + sublog.Error().Err(err).Uint64("calculatedTreeSize", calculatedTreeSize).Msg("could not update treesize of parent node") + } else { + sublog.Debug().Uint64("calculatedTreeSize", calculatedTreeSize).Msg("updated treesize of parent node") + } + } + } } if err != nil { - log.Error().Err(err).Interface("node", n).Msg("error propagating") + sublog.Error().Err(err).Msg("error propagating") return } return } +func calculateTreeSize(ctx context.Context, nodePath string) (uint64, error) { + var size uint64 + + f, err := os.Open(nodePath) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not open dir") + return 0, err + } + defer f.Close() + + names, err := f.Readdirnames(0) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read dirnames") + return 0, err + } + for i := range names { + cPath := filepath.Join(nodePath, names[i]) + info, err := os.Stat(cPath) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not stat child entry") + continue // continue after an error + } + if !info.IsDir() { + blobSize, err := node.ReadBlobSizeAttr(cPath) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read blobSize xattr") + continue // continue after an error + } + size += uint64(blobSize) + } else { + // read from attr + var b []byte + // xattr.Get will follow the symlink + if b, err = xattr.Get(cPath, xattrs.TreesizeAttr); err != nil { + // TODO recursively descend and recalculate treesize + continue // continue after an error + } + csize, err := strconv.ParseUint(string(b), 10, 64) + if err != nil { + // TODO recursively descend and recalculate treesize + continue // continue after an error + } + size += csize + } + } + return size, err + +} + // WriteBlob writes a blob to the blobstore func (t *Tree) WriteBlob(key string, reader io.Reader) error { return t.blobstore.Upload(key, reader) @@ -515,6 +604,10 @@ func (t *Tree) ReadBlob(key string) (io.ReadCloser, error) { // DeleteBlob deletes a blob from the blobstore func (t *Tree) DeleteBlob(key string) error { + if key == "" { + return fmt.Errorf("could not delete blob, empty key was given") + } + return t.blobstore.Delete(key) } @@ -523,7 +616,7 @@ func (t *Tree) createNode(n *node.Node, owner *userpb.UserId) (err error) { // create a directory node nodePath := n.InternalPath() if err = os.MkdirAll(nodePath, 0700); err != nil { - return errors.Wrap(err, "s3ngfs: error creating node") + return errors.Wrap(err, "Decomposedfs: error creating node") } return n.WriteMetadata(owner) @@ -571,7 +664,14 @@ func (t *Tree) readRecycleItem(ctx context.Context, key string) (n *node.Node, t return } - n = node.New(parts[0], "", "", 0, owner, t.lookup) + n = node.New(parts[0], "", "", 0, "", owner, t.lookup) + // lookup blobID in extended attributes + if attrBytes, err = xattr.Get(deletedNodePath, xattrs.BlobIDAttr); err == nil { + n.BlobID = string(attrBytes) + } else { + return + } + // lookup parent id in extended attributes if attrBytes, err = xattr.Get(deletedNodePath, xattrs.ParentidAttr); err == nil { n.ParentID = string(attrBytes) diff --git a/pkg/storage/fs/s3ng/tree/tree_suite_test.go b/pkg/storage/utils/decomposedfs/tree/tree_suite_test.go similarity index 100% rename from pkg/storage/fs/s3ng/tree/tree_suite_test.go rename to pkg/storage/utils/decomposedfs/tree/tree_suite_test.go diff --git a/pkg/storage/utils/decomposedfs/tree/tree_test.go b/pkg/storage/utils/decomposedfs/tree/tree_test.go new file mode 100644 index 0000000000..1fc1ac835b --- /dev/null +++ b/pkg/storage/utils/decomposedfs/tree/tree_test.go @@ -0,0 +1,281 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package tree_test + +import ( + "os" + "path" + + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" + helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" + "github.com/pkg/xattr" + "github.com/stretchr/testify/mock" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Tree", func() { + var ( + env *helpers.TestEnv + + t *tree.Tree + ) + + JustBeforeEach(func() { + var err error + env, err = helpers.NewTestEnv() + Expect(err).ToNot(HaveOccurred()) + t = env.Tree + }) + + AfterEach(func() { + if env != nil { + env.Cleanup() + } + }) + + Context("with an existingfile", func() { + var ( + n *node.Node + ) + + JustBeforeEach(func() { + var err error + n, err = env.Lookup.NodeFromPath(env.Ctx, "dir1/file1") + Expect(err).ToNot(HaveOccurred()) + }) + + Describe("Delete", func() { + JustBeforeEach(func() { + _, err := os.Stat(n.InternalPath()) + Expect(err).ToNot(HaveOccurred()) + + Expect(t.Delete(env.Ctx, n)).To(Succeed()) + + _, err = os.Stat(n.InternalPath()) + Expect(err).To(HaveOccurred()) + }) + + It("moves the file to the trash", func() { + trashPath := path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) + _, err := os.Stat(trashPath) + Expect(err).ToNot(HaveOccurred()) + }) + + It("removes the file from its original location", func() { + _, err := os.Stat(n.InternalPath()) + Expect(err).To(HaveOccurred()) + }) + + It("sets the trash origin xattr", func() { + trashPath := path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) + attr, err := xattr.Get(trashPath, xattrs.TrashOriginAttr) + Expect(err).ToNot(HaveOccurred()) + Expect(string(attr)).To(Equal("dir1/file1")) + }) + + It("does not delete the blob from the blobstore", func() { + env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) + }) + }) + + Context("that was deleted", func() { + var ( + trashPath string + ) + + JustBeforeEach(func() { + env.Blobstore.On("Delete", n.BlobID).Return(nil) + trashPath = path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) + Expect(t.Delete(env.Ctx, n)).To(Succeed()) + }) + + Describe("PurgeRecycleItemFunc", func() { + JustBeforeEach(func() { + _, err := os.Stat(trashPath) + Expect(err).ToNot(HaveOccurred()) + + _, purgeFunc, err := t.PurgeRecycleItemFunc(env.Ctx, env.Owner.Id.OpaqueId+":"+n.ID) + Expect(err).ToNot(HaveOccurred()) + Expect(purgeFunc()).To(Succeed()) + }) + + It("removes the file from the trash", func() { + _, err := os.Stat(trashPath) + Expect(err).To(HaveOccurred()) + }) + + It("deletes the blob from the blobstore", func() { + env.Blobstore.AssertCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) + }) + }) + + Describe("RestoreRecycleItemFunc", func() { + JustBeforeEach(func() { + _, err := os.Stat(trashPath) + Expect(err).ToNot(HaveOccurred()) + _, err = os.Stat(n.InternalPath()) + Expect(err).To(HaveOccurred()) + + _, restoreFunc, err := t.RestoreRecycleItemFunc(env.Ctx, env.Owner.Id.OpaqueId+":"+n.ID) + Expect(err).ToNot(HaveOccurred()) + Expect(restoreFunc()).To(Succeed()) + }) + + It("restores the file to its original location", func() { + _, err := os.Stat(n.InternalPath()) + Expect(err).ToNot(HaveOccurred()) + }) + It("removes the file from the trash", func() { + _, err := os.Stat(trashPath) + Expect(err).To(HaveOccurred()) + }) + }) + }) + }) + + Context("with an empty directory", func() { + var ( + n *node.Node + ) + + JustBeforeEach(func() { + var err error + n, err = env.Lookup.NodeFromPath(env.Ctx, "emptydir") + Expect(err).ToNot(HaveOccurred()) + }) + + Context("that was deleted", func() { + var ( + trashPath string + ) + + JustBeforeEach(func() { + trashPath = path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) + Expect(t.Delete(env.Ctx, n)).To(Succeed()) + }) + + Describe("PurgeRecycleItemFunc", func() { + JustBeforeEach(func() { + _, err := os.Stat(trashPath) + Expect(err).ToNot(HaveOccurred()) + + _, purgeFunc, err := t.PurgeRecycleItemFunc(env.Ctx, env.Owner.Id.OpaqueId+":"+n.ID) + Expect(err).ToNot(HaveOccurred()) + Expect(purgeFunc()).To(Succeed()) + }) + + It("removes the file from the trash", func() { + _, err := os.Stat(trashPath) + Expect(err).To(HaveOccurred()) + }) + + It("does not try to delete a blob from the blobstore", func() { + env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) + }) + }) + }) + }) + + Describe("Propagate", func() { + var dir *node.Node + + JustBeforeEach(func() { + env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) + + // Create test dir + var err error + dir, err = env.CreateTestDir("testdir") + Expect(err).ToNot(HaveOccurred()) + }) + + Describe("with TreeSizeAccounting enabled", func() { + It("calculates the size", func() { + file, err := env.CreateTestFile("file1", "", 1, dir.ID) + Expect(err).ToNot(HaveOccurred()) + + err = env.Tree.Propagate(env.Ctx, file) + Expect(err).ToNot(HaveOccurred()) + size, err := dir.GetTreeSize() + Expect(err).ToNot(HaveOccurred()) + Expect(size).To(Equal(uint64(1))) + }) + + It("considers all files", func() { + _, err := env.CreateTestFile("file1", "", 1, dir.ID) + Expect(err).ToNot(HaveOccurred()) + file2, err := env.CreateTestFile("file2", "", 100, dir.ID) + Expect(err).ToNot(HaveOccurred()) + + err = env.Tree.Propagate(env.Ctx, file2) + Expect(err).ToNot(HaveOccurred()) + size, err := dir.GetTreeSize() + Expect(err).ToNot(HaveOccurred()) + Expect(size).To(Equal(uint64(101))) + }) + + It("adds the size of child directories", func() { + subdir, err := env.CreateTestDir("testdir/200bytes") + Expect(err).ToNot(HaveOccurred()) + err = subdir.SetTreeSize(uint64(200)) + Expect(err).ToNot(HaveOccurred()) + + file, err := env.CreateTestFile("file1", "", 1, dir.ID) + Expect(err).ToNot(HaveOccurred()) + + err = env.Tree.Propagate(env.Ctx, file) + Expect(err).ToNot(HaveOccurred()) + size, err := dir.GetTreeSize() + Expect(err).ToNot(HaveOccurred()) + Expect(size).To(Equal(uint64(201))) + }) + + It("stops at nodes with no propagation flag", func() { + subdir, err := env.CreateTestDir("testdir/200bytes") + Expect(err).ToNot(HaveOccurred()) + err = subdir.SetTreeSize(uint64(200)) + Expect(err).ToNot(HaveOccurred()) + + err = env.Tree.Propagate(env.Ctx, subdir) + Expect(err).ToNot(HaveOccurred()) + size, err := dir.GetTreeSize() + Expect(size).To(Equal(uint64(200))) + Expect(err).ToNot(HaveOccurred()) + + stopdir, err := env.CreateTestDir("testdir/stophere") + Expect(err).ToNot(HaveOccurred()) + err = xattr.Set(stopdir.InternalPath(), xattrs.PropagationAttr, []byte("0")) + Expect(err).ToNot(HaveOccurred()) + otherdir, err := env.CreateTestDir("testdir/stophere/lotsofbytes") + Expect(err).ToNot(HaveOccurred()) + err = otherdir.SetTreeSize(uint64(100000)) + Expect(err).ToNot(HaveOccurred()) + err = env.Tree.Propagate(env.Ctx, otherdir) + Expect(err).ToNot(HaveOccurred()) + + size, err = dir.GetTreeSize() + Expect(err).ToNot(HaveOccurred()) + Expect(size).To(Equal(uint64(200))) + }) + }) + }) +}) diff --git a/pkg/storage/fs/s3ng/upload.go b/pkg/storage/utils/decomposedfs/upload.go similarity index 84% rename from pkg/storage/fs/s3ng/upload.go rename to pkg/storage/utils/decomposedfs/upload.go index f34b90526e..9b74613a39 100644 --- a/pkg/storage/fs/s3ng/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package s3ng +package decomposedfs import ( "context" @@ -39,8 +39,8 @@ import ( "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/node" "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/user" "github.com/google/uuid" "github.com/pkg/errors" @@ -50,9 +50,10 @@ import ( var defaultFilePerm = os.FileMode(0664) +// Upload uploads data to the given resource // TODO Upload (and InitiateUpload) needs a way to receive the expected checksum. // Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? -func (fs *s3ngfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) (err error) { +func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) (err error) { upload, err := fs.GetUpload(ctx, ref.GetPath()) if err != nil { // Upload corresponding to this ID was not found. @@ -65,7 +66,7 @@ func (fs *s3ngfs) Upload(ctx context.Context, ref *provider.Reference, r io.Read return err } if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { - return errors.Wrap(err, "s3ngfs: error retrieving upload") + return errors.Wrap(err, "Decomposedfs: error retrieving upload") } } @@ -74,7 +75,7 @@ func (fs *s3ngfs) Upload(ctx context.Context, ref *provider.Reference, r io.Read p := uploadInfo.info.Storage["NodeName"] ok, err := chunking.IsChunked(p) // check chunking v1 if err != nil { - return errors.Wrap(err, "s3ngfs: error checking path") + return errors.Wrap(err, "Decomposedfs: error checking path") } if ok { var assembledFile string @@ -91,7 +92,7 @@ func (fs *s3ngfs) Upload(ctx context.Context, ref *provider.Reference, r io.Read uploadInfo.info.Storage["NodeName"] = p fd, err := os.Open(assembledFile) if err != nil { - return errors.Wrap(err, "s3ngfs: error opening assembled file") + return errors.Wrap(err, "Decomposedfs: error opening assembled file") } defer fd.Close() defer os.RemoveAll(assembledFile) @@ -99,7 +100,7 @@ func (fs *s3ngfs) Upload(ctx context.Context, ref *provider.Reference, r io.Read } if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { - return errors.Wrap(err, "s3ngfs: error writing to binary file") + return errors.Wrap(err, "Decomposedfs: error writing to binary file") } return uploadInfo.FinishUpload(ctx) @@ -108,7 +109,7 @@ func (fs *s3ngfs) Upload(ctx context.Context, ref *provider.Reference, r io.Read // InitiateUpload returns upload ids corresponding to different protocols it supports // TODO read optional content for small files in this request // TODO InitiateUpload (and Upload) needs a way to receive the expected checksum. Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? -func (fs *s3ngfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { +func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { log := appctx.GetLogger(ctx) @@ -155,7 +156,7 @@ func (fs *s3ngfs) InitiateUpload(ctx context.Context, ref *provider.Reference, u } } - log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("s3ngfs: resolved filename") + log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename") upload, err := fs.NewUpload(ctx, info) if err != nil { @@ -171,7 +172,7 @@ func (fs *s3ngfs) InitiateUpload(ctx context.Context, ref *provider.Reference, u } // UseIn tells the tus upload middleware which extensions it supports. -func (fs *s3ngfs) UseIn(composer *tusd.StoreComposer) { +func (fs *Decomposedfs) UseIn(composer *tusd.StoreComposer) { composer.UseCore(fs) composer.UseTerminater(fs) composer.UseConcater(fs) @@ -182,34 +183,35 @@ func (fs *s3ngfs) UseIn(composer *tusd.StoreComposer) { // - the storage needs to implement NewUpload and GetUpload // - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload -func (fs *s3ngfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { +// NewUpload returns a new tus Upload instance +func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { log := appctx.GetLogger(ctx) - log.Debug().Interface("info", info).Msg("s3ngfs: NewUpload") + log.Debug().Interface("info", info).Msg("Decomposedfs: NewUpload") fn := info.MetaData["filename"] if fn == "" { - return nil, errors.New("s3ngfs: missing filename in metadata") + return nil, errors.New("Decomposedfs: missing filename in metadata") } info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) dir := info.MetaData["dir"] if dir == "" { - return nil, errors.New("s3ngfs: missing dir in metadata") + return nil, errors.New("Decomposedfs: missing dir in metadata") } info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) n, err := fs.lu.NodeFromPath(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) if err != nil { - return nil, errors.Wrap(err, "s3ngfs: error wrapping filename") + return nil, errors.Wrap(err, "Decomposedfs: error wrapping filename") } - log.Debug().Interface("info", info).Interface("node", n).Msg("s3ngfs: resolved filename") + log.Debug().Interface("info", info).Interface("node", n).Msg("Decomposedfs: resolved filename") // the parent owner will become the new owner p, perr := n.Parent() if perr != nil { - return nil, errors.Wrap(perr, "s3ngfs: error getting parent "+n.ParentID) + return nil, errors.Wrap(perr, "Decomposedfs: error getting parent "+n.ParentID) } // check permissions @@ -236,13 +238,13 @@ func (fs *s3ngfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tus binPath, err := fs.getUploadPath(ctx, info.ID) if err != nil { - return nil, errors.Wrap(err, "s3ngfs: error resolving upload path") + return nil, errors.Wrap(err, "Decomposedfs: error resolving upload path") } usr := user.ContextMustGetUser(ctx) owner, err := p.Owner() if err != nil { - return nil, errors.Wrap(err, "s3ngfs: error determining owner") + return nil, errors.Wrap(err, "Decomposedfs: error determining owner") } info.Storage = map[string]string{ @@ -263,7 +265,7 @@ func (fs *s3ngfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tus "LogLevel": log.GetLevel().String(), } // Create binary file in the upload folder with no content - log.Debug().Interface("info", info).Msg("s3ngfs: built storage info") + log.Debug().Interface("info", info).Msg("Decomposedfs: built storage info") file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) if err != nil { return nil, err @@ -279,7 +281,7 @@ func (fs *s3ngfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tus } if !info.SizeIsDeferred && info.Size == 0 { - log.Debug().Interface("info", info).Msg("s3ngfs: finishing upload for empty file") + log.Debug().Interface("info", info).Msg("Decomposedfs: finishing upload for empty file") // no need to create info file and finish directly err := u.FinishUpload(ctx) if err != nil { @@ -297,12 +299,12 @@ func (fs *s3ngfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tus return u, nil } -func (fs *s3ngfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { +func (fs *Decomposedfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { return filepath.Join(fs.o.Root, "uploads", uploadID), nil } // GetUpload returns the Upload for the given upload id -func (fs *s3ngfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { +func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { infoPath := filepath.Join(fs.o.Root, "uploads", id+".info") info := tusd.FileInfo{} @@ -358,7 +360,7 @@ type fileUpload struct { // binPath is the path to the binary file (which has no extension) binPath string // only fs knows how to handle metadata and versions - fs *s3ngfs + fs *Decomposedfs // a context with a user // TODO add logger as well? ctx context.Context @@ -417,14 +419,16 @@ func (upload *fileUpload) writeInfo() error { func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { fi, err := os.Stat(upload.binPath) if err != nil { - appctx.GetLogger(upload.ctx).Err(err).Msg("s3ngfs: could not stat uploaded file") + appctx.GetLogger(upload.ctx).Err(err).Msg("Decomposedfs: could not stat uploaded file") return } + n := node.New( upload.info.Storage["NodeId"], upload.info.Storage["NodeParentId"], upload.info.Storage["NodeName"], fi.Size(), + "", nil, upload.fs.lu, ) @@ -450,7 +454,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { { f, err := os.Open(upload.binPath) if err != nil { - sublog.Err(err).Msg("s3ngfs: could not open file for checksumming") + sublog.Err(err).Msg("Decomposedfs: could not open file for checksumming") // we can continue if no oc checksum header is set } defer f.Close() @@ -459,7 +463,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { r2 := io.TeeReader(r1, md5h) if _, err := io.Copy(adler32h, r2); err != nil { - sublog.Err(err).Msg("s3ngfs: could not copy bytes for checksumming") + sublog.Err(err).Msg("Decomposedfs: could not copy bytes for checksumming") } } // compare if they match the sent checksum @@ -483,6 +487,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { return err } } + n.BlobID = upload.info.ID // This can be changed to a content hash in the future when reference counting for the blobs was added // defer writing the checksums until the node is in place @@ -495,7 +500,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { sublog.Err(err). Str("binPath", upload.binPath). Str("versionsPath", versionsPath). - Msg("s3ngfs: could not create version") + Msg("Decomposedfs: could not create version") return } } @@ -506,7 +511,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { return err } defer file.Close() - err = upload.fs.tp.WriteBlob(n.ID, file) + err = upload.fs.tp.WriteBlob(n.BlobID, file) if err != nil { return errors.Wrap(err, "failed to upload file to blostore") } @@ -516,12 +521,12 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { // TODO trigger a workflow as the final rename might eg involve antivirus scanning if err = os.Truncate(upload.binPath, 0); err != nil { sublog.Err(err). - Msg("s3ngfs: could not truncate") + Msg("Decomposedfs: could not truncate") return } if err = os.Rename(upload.binPath, targetPath); err != nil { sublog.Err(err). - Msg("s3ngfs: could not rename") + Msg("Decomposedfs: could not rename") return } @@ -536,7 +541,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { OpaqueId: upload.info.Storage["OwnerId"], }) if err != nil { - return errors.Wrap(err, "s3ngfs: could not write metadata") + return errors.Wrap(err, "Decomposedfs: could not write metadata") } // link child name to parent if it is new @@ -548,22 +553,22 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { Interface("node", n). Str("childNameLink", childNameLink). Str("link", link). - Msg("s3ngfs: child name link has wrong target id, repairing") + Msg("Decomposedfs: child name link has wrong target id, repairing") if err = os.Remove(childNameLink); err != nil { - return errors.Wrap(err, "s3ngfs: could not remove symlink child entry") + return errors.Wrap(err, "Decomposedfs: could not remove symlink child entry") } } if os.IsNotExist(err) || link != "../"+n.ID { if err = os.Symlink("../"+n.ID, childNameLink); err != nil { - return errors.Wrap(err, "s3ngfs: could not symlink child entry") + return errors.Wrap(err, "Decomposedfs: could not symlink child entry") } } // only delete the upload if it was successfully written to the storage if err = os.Remove(upload.infoPath); err != nil { if !os.IsNotExist(err) { - sublog.Err(err).Msg("s3ngfs: could not delete upload info") + sublog.Err(err).Msg("Decomposedfs: could not delete upload info") return } } @@ -571,7 +576,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { /*if upload.info.MetaData["mtime"] != "" { err := upload.fs.SetMtime(ctx, np, upload.info.MetaData["mtime"]) if err != nil { - log.Err(err).Interface("info", upload.info).Msg("s3ngfs: could not set mtime metadata") + log.Err(err).Interface("info", upload.info).Msg("Decomposedfs: could not set mtime metadata") return err } }*/ @@ -593,7 +598,7 @@ func tryWritingChecksum(log *zerolog.Logger, n *node.Node, algo string, h hash.H log.Err(err). Str("csType", algo). Bytes("hash", h.Sum(nil)). - Msg("s3ngfs: could not write checksum") + Msg("Decomposedfs: could not write checksum") // this is not critical, the bytes are there so we will continue } } @@ -601,7 +606,7 @@ func tryWritingChecksum(log *zerolog.Logger, n *node.Node, algo string, h hash.H func (upload *fileUpload) discardChunk() { if err := os.Remove(upload.binPath); err != nil { if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("s3ngfs: could not discard chunk") + appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk") return } } @@ -612,7 +617,7 @@ func (upload *fileUpload) discardChunk() { // - the upload needs to implement Terminate // AsTerminatableUpload returns a TerminatableUpload -func (fs *s3ngfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { +func (fs *Decomposedfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { return upload.(*fileUpload) } @@ -636,7 +641,7 @@ func (upload *fileUpload) Terminate(ctx context.Context) error { // - the upload needs to implement DeclareLength // AsLengthDeclarableUpload returns a LengthDeclarableUpload -func (fs *s3ngfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { +func (fs *Decomposedfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { return upload.(*fileUpload) } @@ -652,7 +657,7 @@ func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error // - the upload needs to implement ConcatUploads // AsConcatableUpload returns a ConcatableUpload -func (fs *s3ngfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { +func (fs *Decomposedfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { return upload.(*fileUpload) } diff --git a/pkg/storage/fs/s3ng/upload_test.go b/pkg/storage/utils/decomposedfs/upload_test.go similarity index 83% rename from pkg/storage/fs/s3ng/upload_test.go rename to pkg/storage/utils/decomposedfs/upload_test.go index 24109aa667..89b1040c12 100644 --- a/pkg/storage/fs/s3ng/upload_test.go +++ b/pkg/storage/utils/decomposedfs/upload_test.go @@ -16,7 +16,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -package s3ng_test +package decomposedfs_test import ( "bytes" @@ -31,10 +31,11 @@ import ( "github.com/stretchr/testify/mock" "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/s3ng" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/mocks" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree" - treemocks "github.com/cs3org/reva/pkg/storage/fs/s3ng/tree/mocks" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/mocks" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" + treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" ruser "github.com/cs3org/reva/pkg/user" . "github.com/onsi/ginkgo" @@ -48,8 +49,8 @@ var _ = Describe("File uploads", func() { user *userpb.User ctx context.Context - options map[string]interface{} - lookup *s3ng.Lookup + o *options.Options + lookup *decomposedfs.Lookup permissions *mocks.PermissionsChecker bs *treemocks.Blobstore ) @@ -72,21 +73,17 @@ var _ = Describe("File uploads", func() { tmpRoot, err := ioutil.TempDir("", "reva-unit-tests-*-root") Expect(err).ToNot(HaveOccurred()) - options = map[string]interface{}{ - "root": tmpRoot, - "s3.endpoint": "http://1.2.3.4:5000", - "s3.region": "default", - "s3.bucket": "the-bucket", - "s3.access_key": "foo", - "s3.secret_key": "bar", - } - lookup = &s3ng.Lookup{} + o, err = options.New(map[string]interface{}{ + "root": tmpRoot, + }) + Expect(err).ToNot(HaveOccurred()) + lookup = &decomposedfs.Lookup{Options: o} permissions = &mocks.PermissionsChecker{} bs = &treemocks.Blobstore{} }) AfterEach(func() { - root := options["root"].(string) + root := o.Root if strings.HasPrefix(root, os.TempDir()) { os.RemoveAll(root) } @@ -94,8 +91,8 @@ var _ = Describe("File uploads", func() { JustBeforeEach(func() { var err error - tree := tree.New(options["root"].(string), true, true, lookup, bs) - fs, err = s3ng.New(options, lookup, permissions, tree) + tree := tree.New(o.Root, true, true, lookup, bs) + fs, err = decomposedfs.New(o, lookup, permissions, tree) Expect(err).ToNot(HaveOccurred()) }) diff --git a/pkg/storage/fs/s3ng/xattrs/xattrs.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go similarity index 88% rename from pkg/storage/fs/s3ng/xattrs/xattrs.go rename to pkg/storage/utils/decomposedfs/xattrs/xattrs.go index b8e3f38639..5f2c3f4678 100644 --- a/pkg/storage/fs/s3ng/xattrs/xattrs.go +++ b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go @@ -34,6 +34,7 @@ const ( // the base name of the node // updated when the file is renamed or moved NameAttr string = OcisPrefix + "name" + BlobIDAttr string = OcisPrefix + "blobid" BlobsizeAttr string = OcisPrefix + "blobsize" // grantPrefix is the prefix for sharing related extended attributes @@ -50,6 +51,8 @@ const ( TrashOriginAttr string = OcisPrefix + "trash.origin" // trash origin // we use a single attribute to enable or disable propagation of both: synctime and treesize + // The propagation attribute is set to '1' at the top of the (sub)tree. Propagation will stop at + // that node. PropagationAttr string = OcisPrefix + "propagation" // the tree modification time of the tree below this node, @@ -61,7 +64,11 @@ const ( // the size of the tree below this node, // propagated when treesize_accounting is true and // user.ocis.propagation=1 is set - //treesizeAttr string = OcisPrefix + "treesize" + // stored as uint64, little endian + TreesizeAttr string = OcisPrefix + "treesize" + + // the quota for the storage space / tree, regardless who accesses it + QuotaAttr string = OcisPrefix + "quota" UserAcePrefix string = "u:" GroupAcePrefix string = "g:" diff --git a/tests/oc-integration-tests/local/storage-publiclink.toml b/tests/oc-integration-tests/local/storage-publiclink.toml index 892c49567a..7914b773c6 100644 --- a/tests/oc-integration-tests/local/storage-publiclink.toml +++ b/tests/oc-integration-tests/local/storage-publiclink.toml @@ -9,14 +9,5 @@ address = "0.0.0.0:13000" # This is a storage provider that grants direct access to the wrapped storage # we have a locally running dataprovider [grpc.services.publicstorageprovider] -driver = "owncloud" mount_path = "/public/" -mount_id = "e1a73ede-549b-4226-abdf-40e69ca8230d" -expose_data_server = true -data_server_url = "http://revad-services:13001/data" gateway_addr = "0.0.0.0:19000" -enable_home_creation = true - -[grpc.services.publicstorageprovider.drivers.owncloud] -datadirectory = "/var/tmp/reva/data" -enable_home = true