Skip to content

Commit 61e1b43

Browse files
committed
Skip unmount layers for sandbox container.
* Skip unmounting the layers for the sandbox container as we know the UVM gets torn down shortly afterwards. Signed-off-by: Daniel Canter <[email protected]>
1 parent e7d50a7 commit 61e1b43

4 files changed

Lines changed: 83 additions & 36 deletions

File tree

internal/hcsoci/create.go

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ func initializeCreateOptions(ctx context.Context, createOptions *CreateOptions)
220220

221221
// configureSandboxNetwork creates a new network namespace for the pod (sandbox)
222222
// if required and then adds that namespace to the pod.
223-
func configureSandboxNetwork(ctx context.Context, coi *createOptionsInternal, r *resources.Resources) error {
223+
func configureSandboxNetwork(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, ct oci.KubernetesContainerType) error {
224224
if coi.NetworkNamespace != "" {
225225
r.SetNetNS(coi.NetworkNamespace)
226226
} else {
@@ -232,15 +232,11 @@ func configureSandboxNetwork(ctx context.Context, coi *createOptionsInternal, r
232232
coi.actualNetworkNamespace = r.NetNS()
233233

234234
if coi.HostingSystem != nil {
235-
ct, _, err := oci.GetSandboxTypeAndID(coi.Spec.Annotations)
236-
if err != nil {
237-
return err
238-
}
239235
// Only add the network namespace to a standalone or sandbox
240236
// container but not a workload container in a sandbox that inherits
241237
// the namespace.
242238
if ct == oci.KubernetesContainerTypeNone || ct == oci.KubernetesContainerTypeSandbox {
243-
if err = SetupNetworkNamespace(ctx, coi.HostingSystem, coi.actualNetworkNamespace); err != nil {
239+
if err := SetupNetworkNamespace(ctx, coi.HostingSystem, coi.actualNetworkNamespace); err != nil {
244240
return err
245241
}
246242
r.SetAddedNetNSToVM(true)
@@ -284,14 +280,19 @@ func CreateContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.C
284280
}
285281
}
286282

283+
ct, _, err := oci.GetSandboxTypeAndID(coi.Spec.Annotations)
284+
if err != nil {
285+
return nil, r, err
286+
}
287+
isSandbox := ct == oci.KubernetesContainerTypeSandbox
288+
287289
// Create a network namespace if necessary.
288290
if coi.Spec.Windows != nil &&
289291
coi.Spec.Windows.Network != nil &&
290292
schemaversion.IsV21(coi.actualSchemaVersion) {
291-
err = configureSandboxNetwork(ctx, coi, r)
293+
err = configureSandboxNetwork(ctx, coi, r, ct)
292294
if err != nil {
293295
return nil, r, fmt.Errorf("failure while creating namespace for container: %s", err)
294-
295296
}
296297
}
297298

@@ -302,7 +303,7 @@ func CreateContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.C
302303
return nil, r, errors.New("LCOW v1 not supported")
303304
}
304305
log.G(ctx).Debug("hcsshim::CreateContainer allocateLinuxResources")
305-
err = allocateLinuxResources(ctx, coi, r)
306+
err = allocateLinuxResources(ctx, coi, r, isSandbox)
306307
if err != nil {
307308
log.G(ctx).WithError(err).Debug("failed to allocateLinuxResources")
308309
return nil, r, err
@@ -313,7 +314,7 @@ func CreateContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.C
313314
return nil, r, err
314315
}
315316
} else {
316-
err = allocateWindowsResources(ctx, coi, r)
317+
err = allocateWindowsResources(ctx, coi, r, isSandbox)
317318
if err != nil {
318319
log.G(ctx).WithError(err).Debug("failed to allocateWindowsResources")
319320
return nil, r, err

internal/hcsoci/resources_lcow.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ func getGPUVHDPath(coi *createOptionsInternal) (string, error) {
3535
return gpuVHDPath, nil
3636
}
3737

38-
func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources) error {
38+
func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, isSandbox bool) error {
3939
if coi.Spec.Root == nil {
4040
coi.Spec.Root = &specs.Root{}
4141
}
@@ -44,10 +44,10 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *
4444
log.G(ctx).Debug("hcsshim::allocateLinuxResources mounting storage")
4545
rootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, coi.HostingSystem)
4646
if err != nil {
47-
return fmt.Errorf("failed to mount container storage: %s", err)
47+
return errors.Wrap(err, "failed to mount container storage")
4848
}
4949
coi.Spec.Root.Path = rootPath
50-
layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders)
50+
layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, isSandbox)
5151
r.SetLayers(layers)
5252
} else if coi.Spec.Root.Path != "" {
5353
// This is the "Plan 9" root filesystem.
@@ -56,7 +56,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *
5656
uvmPathForContainersFileSystem := path.Join(r.ContainerRootInUVM(), uvm.RootfsPath)
5757
share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly, false, nil)
5858
if err != nil {
59-
return fmt.Errorf("adding plan9 root: %s", err)
59+
return errors.Wrap(err, "adding plan9 root")
6060
}
6161
coi.Spec.Root.Path = uvmPathForContainersFileSystem
6262
r.Add(share)
@@ -95,7 +95,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *
9595
uvmPathForShare = fmt.Sprintf(uvm.LCOWGlobalMountPrefix, coi.HostingSystem.UVMMountCounter())
9696
scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly)
9797
if err != nil {
98-
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
98+
return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount)
9999
}
100100

101101
uvmPathForFile = scsiMount.UVMPath
@@ -110,7 +110,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *
110110
// that is where it was previously mounted in UVM
111111
scsiMount, err := coi.HostingSystem.AddSCSI(ctx, hostPath, uvmPathForShare, readOnly, uvm.VMAccessTypeIndividual)
112112
if err != nil {
113-
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
113+
return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount)
114114
}
115115

116116
uvmPathForFile = scsiMount.UVMPath
@@ -124,7 +124,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *
124124
} else {
125125
st, err := os.Stat(hostPath)
126126
if err != nil {
127-
return fmt.Errorf("could not open bind mount target: %s", err)
127+
return errors.Wrap(err, "could not open bind mount target")
128128
}
129129
restrictAccess := false
130130
var allowedNames []string
@@ -140,7 +140,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *
140140
l.Debug("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount")
141141
share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForShare, readOnly, restrictAccess, allowedNames)
142142
if err != nil {
143-
return fmt.Errorf("adding plan9 mount %+v: %s", mount, err)
143+
return errors.Wrapf(err, "adding plan9 mount %+v", mount)
144144
}
145145
r.Add(share)
146146
}

internal/hcsoci/resources_wcow.go

Lines changed: 48 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,12 @@ import (
1919
"github.com/Microsoft/hcsshim/internal/uvm"
2020
"github.com/Microsoft/hcsshim/internal/wclayer"
2121
specs "github.com/opencontainers/runtime-spec/specs-go"
22+
"github.com/pkg/errors"
2223
)
2324

24-
func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources) error {
25+
func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, isSandbox bool) error {
2526
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.LayerFolders == nil {
26-
return fmt.Errorf("field 'Spec.Windows.Layerfolders' is not populated")
27+
return errors.New("field 'Spec.Windows.Layerfolders' is not populated")
2728
}
2829

2930
scratchFolder := coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
@@ -32,15 +33,15 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r
3233
// Create the directory for the RW scratch layer if it doesn't exist
3334
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
3435
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
35-
return fmt.Errorf("failed to auto-create container scratch folder %s: %s", scratchFolder, err)
36+
return errors.Wrapf(err, "failed to auto-create container scratch folder %s", scratchFolder)
3637
}
3738
}
3839

3940
// Create sandbox.vhdx if it doesn't exist in the scratch folder. It's called sandbox.vhdx
4041
// rather than scratch.vhdx as in the v1 schema, it's hard-coded in HCS.
4142
if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) {
4243
if err := wclayer.CreateScratchLayer(ctx, scratchFolder, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]); err != nil {
43-
return fmt.Errorf("failed to CreateSandboxLayer %s", err)
44+
return errors.Wrap(err, "failed to CreateSandboxLayer")
4445
}
4546
}
4647

@@ -53,10 +54,10 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r
5354
containerRootInUVM := r.ContainerRootInUVM()
5455
containerRootPath, err := layers.MountContainerLayers(ctx, coi.Spec.Windows.LayerFolders, containerRootInUVM, coi.HostingSystem)
5556
if err != nil {
56-
return fmt.Errorf("failed to mount container storage: %s", err)
57+
return errors.Wrap(err, "failed to mount container storage")
5758
}
5859
coi.Spec.Root.Path = containerRootPath
59-
layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders)
60+
layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, isSandbox)
6061
r.SetLayers(layers)
6162
}
6263

@@ -136,37 +137,74 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R
136137
l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount")
137138
scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, mount.Source, uvmPath, readOnly)
138139
if err != nil {
139-
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
140+
return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount)
140141
}
141142
coi.Spec.Mounts[i].Type = ""
142143
r.Add(scsiMount)
143144
} else if mount.Type == "virtual-disk" {
144145
l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount")
145146
scsiMount, err := coi.HostingSystem.AddSCSI(ctx, mount.Source, uvmPath, readOnly, uvm.VMAccessTypeIndividual)
146147
if err != nil {
147-
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
148+
return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount)
148149
}
149150
coi.Spec.Mounts[i].Type = ""
150151
r.Add(scsiMount)
151152
} else {
152153
if uvm.IsPipe(mount.Source) {
153154
pipe, err := coi.HostingSystem.AddPipe(ctx, mount.Source)
154155
if err != nil {
155-
return fmt.Errorf("failed to add named pipe to UVM: %s", err)
156+
return errors.Wrap(err, "failed to add named pipe to UVM")
156157
}
157158
r.Add(pipe)
158159
} else {
159160
l.Debug("hcsshim::allocateWindowsResources Hot-adding VSMB share for OCI mount")
160161
options := coi.HostingSystem.DefaultVSMBOptions(readOnly)
161162
share, err := coi.HostingSystem.AddVSMB(ctx, mount.Source, options)
162163
if err != nil {
163-
return fmt.Errorf("failed to add VSMB share to utility VM for mount %+v: %s", mount, err)
164+
return errors.Wrapf(err, "failed to add VSMB share to utility VM for mount %+v", mount)
164165
}
165166
r.Add(share)
166167
}
167168
}
168169
}
169170
}
170171

172+
if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok {
173+
// Only need to create a CCG instance for v2 containers
174+
if schemaversion.IsV21(coi.actualSchemaVersion) {
175+
hypervisorIsolated := coi.HostingSystem != nil
176+
ccgInstance, ccgResource, err := credentials.CreateCredentialGuard(ctx, coi.actualID, cs, hypervisorIsolated)
177+
if err != nil {
178+
return err
179+
}
180+
coi.ccgState = ccgInstance.CredentialGuard
181+
r.Add(ccgResource)
182+
if hypervisorIsolated {
183+
// If hypervisor isolated we need to add an hvsocket service table entry
184+
// By default HVSocket won't allow something inside the VM to connect
185+
// back to a process on the host. We need to update the HVSocket service table
186+
// to allow a connection to CCG.exe on the host, so that GMSA can function.
187+
// We need to hot add this here because at UVM creation time we don't know what containers
188+
// will be launched in the UVM, nonetheless if they will ask for GMSA. This is a workaround
189+
// for the previous design requirement for CCG V2 where the service entry
190+
// must be present in the UVM'S HCS document before being sent over as hot adding
191+
// an HvSocket service was not possible.
192+
hvSockConfig := ccgInstance.HvSocketConfig
193+
if err := coi.HostingSystem.UpdateHvSocketService(ctx, hvSockConfig.ServiceId, hvSockConfig.ServiceConfig); err != nil {
194+
return errors.Wrap(err, "failed to update hvsocket service")
195+
}
196+
}
197+
}
198+
}
199+
200+
if coi.HostingSystem != nil && coi.hasWindowsAssignedDevices() {
201+
windowsDevices, closers, err := handleAssignedDevicesWindows(ctx, coi.HostingSystem, coi.Spec.Annotations, coi.Spec.Windows.Devices)
202+
if err != nil {
203+
return err
204+
}
205+
r.Add(closers...)
206+
coi.Spec.Windows.Devices = windowsDevices
207+
}
208+
171209
return nil
172210
}

internal/layers/layers.go

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,18 +23,26 @@ type ImageLayers struct {
2323
vm *uvm.UtilityVM
2424
containerRootInUVM string
2525
layers []string
26+
// In some instances we may want to avoid cleaning up the image layers, such as when tearing
27+
// down a sandbox container since the UVM will be torn down shortly after and the resources
28+
// can be cleaned up on the host.
29+
skipCleanup bool
2630
}
2731

28-
func NewImageLayers(vm *uvm.UtilityVM, containerRootInUVM string, layers []string) *ImageLayers {
32+
func NewImageLayers(vm *uvm.UtilityVM, containerRootInUVM string, layers []string, skipCleanup bool) *ImageLayers {
2933
return &ImageLayers{
3034
vm: vm,
3135
containerRootInUVM: containerRootInUVM,
3236
layers: layers,
37+
skipCleanup: skipCleanup,
3338
}
3439
}
3540

3641
// Release unmounts all of the layers located in the layers array.
3742
func (layers *ImageLayers) Release(ctx context.Context, all bool) error {
43+
if layers.skipCleanup && layers.vm != nil {
44+
return nil
45+
}
3846
op := UnmountOperationSCSI
3947
if layers.vm == nil || all {
4048
op = UnmountOperationAll
@@ -244,9 +252,9 @@ func removeLCOWLayer(ctx context.Context, uvm *uvmpkg.UtilityVM, layerPath strin
244252
}).Debug("Removed LCOW layer")
245253
return nil
246254
}
247-
return fmt.Errorf("failed to remove SCSI layer: %s", err)
255+
return errors.Wrap(err, "failed to remove SCSI layer")
248256
}
249-
return fmt.Errorf("failed to remove VPMEM layer: %s", err)
257+
return errors.Wrap(err, "failed to remove VPMEM layer")
250258
}
251259

252260
// UnmountOperation is used when calling Unmount() to determine what type of unmount is
@@ -270,10 +278,10 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe
270278
if uvm == nil {
271279
// Must be an argon - folders are mounted on the host
272280
if op != UnmountOperationAll {
273-
return fmt.Errorf("only operation supported for host-mounted folders is unmountOperationAll")
281+
return errors.New("only operation supported for host-mounted folders is unmountOperationAll")
274282
}
275283
if len(layerFolders) < 1 {
276-
return fmt.Errorf("need at least one layer for Unmount")
284+
return errors.New("need at least one layer for Unmount")
277285
}
278286
path := layerFolders[len(layerFolders)-1]
279287
if err := wclayer.UnprepareLayer(ctx, path); err != nil {
@@ -286,7 +294,7 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe
286294

287295
// Base+Scratch as a minimum. This is different to v1 which only requires the scratch
288296
if len(layerFolders) < 2 {
289-
return fmt.Errorf("at least two layers are required for unmount")
297+
return errors.New("at least two layers are required for unmount")
290298
}
291299

292300
var retError error
@@ -302,7 +310,7 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, containe
302310
if (op & UnmountOperationSCSI) == UnmountOperationSCSI {
303311
hostScratchFile, err := getScratchVHDPath(layerFolders)
304312
if err != nil {
305-
return fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err)
313+
return errors.Wrap(err, "failed to get scratch VHD path in layer folders")
306314
}
307315
if err := uvm.RemoveSCSI(ctx, hostScratchFile); err != nil {
308316
log.G(ctx).WithError(err).Warn("failed to remove scratch")
@@ -383,7 +391,7 @@ func getScratchVHDPath(layerFolders []string) (string, error) {
383391
// Evaluate the symlink here (if there is one).
384392
hostPath, err := filepath.EvalSymlinks(hostPath)
385393
if err != nil {
386-
return "", fmt.Errorf("failed to eval symlinks: %s", err)
394+
return "", errors.Wrap(err, "failed to eval symlinks")
387395
}
388396
return hostPath, nil
389397
}

0 commit comments

Comments
 (0)