Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Layer refactor #2029

Merged
merged 5 commits into from
Mar 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 2 additions & 25 deletions cmd/containerd-shim-runhcs-v1/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ package main

import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
Expand All @@ -18,10 +17,8 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"

"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/layers"
"github.com/Microsoft/hcsshim/internal/memory"
"github.com/Microsoft/hcsshim/internal/oc"
cimlayer "github.com/Microsoft/hcsshim/internal/wclayer/cim"
"github.com/Microsoft/hcsshim/internal/winapi"
)

Expand Down Expand Up @@ -126,28 +123,8 @@ The delete command will be executed in the container's bundle as its cwd.
fmt.Fprintf(os.Stderr, "failed to delete user %q: %v", username, err)
}

// cleanup the layers mounted for the container. We currently only handle cleanup of CimFS
// layers here. First n-1 values should be the image layerFolders (topmost layer being at
// index 0) and the last entry should be the scratch layer
var layerFolders []string
f, err := os.Open(filepath.Join(bundleFlag, layersFile))
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
fmt.Fprintf(os.Stderr, "open layers file: %s", err)
}
} else {
defer f.Close()
if err = json.NewDecoder(f).Decode(&layerFolders); err != nil {
fmt.Fprintf(os.Stderr, "decode layers json: %s", err)
}
}
if err == nil && cimlayer.IsCimLayer(layerFolders[0]) {
scratchLayerFolderPath := layerFolders[len(layerFolders)-1]
err = layers.ReleaseCimFSHostLayers(ctx, scratchLayerFolderPath, idFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "cleanup container %q mounts: %s", idFlag, err)
}
}
// TODO(ambarve):
ambarve marked this conversation as resolved.
Show resolved Hide resolved
// correctly handle cleanup of cimfs layers in case of shim process crash here.
Comment on lines +126 to +127
Copy link
Member

@kevpar kevpar Mar 13, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's the current thinking here? Are we still evaluating with containerd if the new deletion behavior should be changed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The old behavior of container was actually due to a bug, that means with 2.0 onwards we need to persist layer data for each container started within a shim and properly clean it up during shim delete call. However, that's a medium/large change so it needs to be handled in a separate PR.


if data, err := proto.Marshal(&task.DeleteResponse{
ExitedAt: timestamppb.New(time.Now()),
Expand Down
1 change: 0 additions & 1 deletion cmd/containerd-shim-runhcs-v1/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (

const usage = ``
const ttrpcAddressEnv = "TTRPC_ADDRESS"
const layersFile = "layers.json"

// Add a manifest to get proper Windows version detection.
//go:generate go run github.com/josephspurrier/goversioninfo/cmd/goversioninfo -platform-specific
Expand Down
18 changes: 6 additions & 12 deletions cmd/containerd-shim-runhcs-v1/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"strings"
"sync"

"github.com/Microsoft/hcsshim/internal/layers"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oci"
"github.com/Microsoft/hcsshim/internal/uvm"
Expand Down Expand Up @@ -122,22 +123,15 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques
return nil, err
}
case *uvm.OptionsWCOW:
var layerFolders []string
if s.Windows != nil {
layerFolders = s.Windows.LayerFolders
}
wopts := (opts).(*uvm.OptionsWCOW)

// In order for the UVM sandbox.vhdx not to collide with the actual
// nested Argon sandbox.vhdx we append the \vm folder to the last
// entry in the list.
layersLen := len(s.Windows.LayerFolders)
layers := make([]string, layersLen)
copy(layers, s.Windows.LayerFolders)

vmPath := filepath.Join(layers[layersLen-1], "vm")
err := os.MkdirAll(vmPath, 0)
wopts.BootFiles, err = layers.GetWCOWUVMBootFilesFromLayers(ctx, req.Rootfs, layerFolders)
if err != nil {
return nil, err
}
layers[layersLen-1] = vmPath
wopts.LayerFolders = layers

parent, err = uvm.CreateWCOW(ctx, wopts)
if err != nil {
Expand Down
144 changes: 0 additions & 144 deletions cmd/containerd-shim-runhcs-v1/rootfs.go

This file was deleted.

54 changes: 0 additions & 54 deletions cmd/containerd-shim-runhcs-v1/service_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,6 @@ import (

var empty = &emptypb.Empty{}

// TODO(ambarve): Once we can vendor containerd 2.0 in hcsshim, we should directly reference these types from
// containerd module
const (
LegacyMountType string = "windows-layer"
CimFSMountType string = "CimFS"
)

// getPod returns the pod this shim is tracking or else returns `nil`. It is the
// callers responsibility to verify that `s.isSandbox == true` before calling
// this method.
Expand Down Expand Up @@ -123,53 +116,6 @@ func (s *service) createInternal(ctx context.Context, req *task.CreateTaskReques
}
}

var layerFolders []string
if spec.Windows != nil {
layerFolders = spec.Windows.LayerFolders
}
if err := validateRootfsAndLayers(req.Rootfs, layerFolders); err != nil {
return nil, err
}

// Only work with Windows here.
// Parsing of the rootfs mount for Linux containers occurs later.
if spec.Linux == nil && len(req.Rootfs) > 0 {
// For Windows containers, we work with LayerFolders throughout
// much of the creation logic in the shim. If we were given a
// rootfs mount, convert it to LayerFolders here.
m := req.Rootfs[0]
if m.Type != LegacyMountType && m.Type != CimFSMountType {
return nil, fmt.Errorf("unsupported Windows mount type: %s", m.Type)
} else if m.Type == CimFSMountType && (shimOpts.SandboxIsolation == runhcsopts.Options_HYPERVISOR) {
// For CIMFS layers only process isolation is supported right now.
return nil, fmt.Errorf("cimfs doesn't support hyperv isolation")
}

source, parentLayerPaths, err := parseLegacyRootfsMount(m)
if err != nil {
return nil, err
}

// Append the parents
spec.Windows.LayerFolders = append(spec.Windows.LayerFolders, parentLayerPaths...)
// Append the scratch
spec.Windows.LayerFolders = append(spec.Windows.LayerFolders, source)

if m.Type == CimFSMountType {
// write the layers to a file so that it can be used for proper cleanup during shim
// delete. We can't write to the config.json as it is read-only for shim.
f, err = os.Create(filepath.Join(req.Bundle, layersFile))
if err != nil {
return nil, err
}
if err := json.NewEncoder(f).Encode(spec.Windows.LayerFolders); err != nil {
f.Close()
return nil, err
}
f.Close()
}
}

// This is a Windows Argon make sure that we have a Root filled in.
if spec.Windows.HyperV == nil {
if spec.Root == nil {
Expand Down
51 changes: 26 additions & 25 deletions cmd/containerd-shim-runhcs-v1/task_hcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/Microsoft/hcsshim/internal/hcsoci"
"github.com/Microsoft/hcsshim/internal/jobcontainers"
"github.com/Microsoft/hcsshim/internal/layers"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/memory"
"github.com/Microsoft/hcsshim/internal/oc"
Expand Down Expand Up @@ -82,23 +83,15 @@ func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.Creat
return nil, err
}
case *uvm.OptionsWCOW:
var layerFolders []string
if s.Windows != nil {
layerFolders = s.Windows.LayerFolders
}
wopts := (opts).(*uvm.OptionsWCOW)

// In order for the UVM sandbox.vhdx not to collide with the actual
// nested Argon sandbox.vhdx we append the \vm folder to the last
// entry in the list.
layersLen := len(s.Windows.LayerFolders)
layers := make([]string, layersLen)
copy(layers, s.Windows.LayerFolders)

vmPath := filepath.Join(layers[layersLen-1], "vm")
err := os.MkdirAll(vmPath, 0)
wopts.BootFiles, err = layers.GetWCOWUVMBootFilesFromLayers(ctx, req.Rootfs, layerFolders)
if err != nil {
return nil, err
}
layers[layersLen-1] = vmPath
wopts.LayerFolders = layers

parent, err = uvm.CreateWCOW(ctx, wopts)
if err != nil {
return nil, err
Expand Down Expand Up @@ -140,8 +133,24 @@ func createContainer(
resources *resources.Resources
)

var wcowLayers layers.WCOWLayers
var lcowLayers *layers.LCOWLayers
var layerFolders []string
if s.Windows != nil {
layerFolders = s.Windows.LayerFolders
}
if s.Linux != nil {
lcowLayers, err = layers.ParseLCOWLayers(rootfs, layerFolders)
} else {
wcowLayers, err = layers.ParseWCOWLayers(rootfs, layerFolders)
}
if err != nil {
return nil, nil, err
}

if oci.IsJobContainer(s) {
container, resources, err = jobcontainers.Create(ctx, id, s)
opts := jobcontainers.CreateOptions{WCOWLayers: wcowLayers}
container, resources, err = jobcontainers.Create(ctx, id, s, opts)
if err != nil {
return nil, nil, err
}
Expand All @@ -152,18 +161,10 @@ func createContainer(
Spec: s,
HostingSystem: parent,
NetworkNamespace: netNS,
LCOWLayers: lcowLayers,
WCOWLayers: wcowLayers,
kevpar marked this conversation as resolved.
Show resolved Hide resolved
}
if s.Linux != nil {
var layerFolders []string
if s.Windows != nil {
layerFolders = s.Windows.LayerFolders
}
lcowLayers, err := getLCOWLayers(rootfs, layerFolders)
if err != nil {
return nil, nil, err
}
opts.LCOWLayers = lcowLayers
}

if shimOpts != nil {
opts.ScaleCPULimitsToSandbox = shimOpts.ScaleCpuLimitsToSandbox
}
Expand Down
Loading