Skip to content

Commit 0d8fb0f

Browse files
chore: return early success on vmss detach disk (#9818)
Co-authored-by: Lana Andreasyan <[email protected]>
1 parent 2e3b704 commit 0d8fb0f

File tree

2 files changed

+70
-27
lines changed

2 files changed

+70
-27
lines changed

pkg/provider/azure_controller_vmss.go

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -172,19 +172,26 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis
172172
}
173173

174174
if !bFoundDisk {
175-
// only log here, next action is to update VM status with original meta data
176-
klog.Warningf("detach azure disk on node(%s): disk list(%s) not found", nodeName, diskMap)
177-
} else {
178-
if ss.IsStackCloud() {
179-
// Azure stack does not support ToBeDetached flag, use original way to detach disk
180-
var newDisks []*armcompute.DataDisk
181-
for _, disk := range disks {
182-
if !ptr.Deref(disk.ToBeDetached, false) {
183-
newDisks = append(newDisks, disk)
184-
}
175+
// No matching disks found to detach - skip the VM update.
176+
// This can occur when:
177+
// 1. The VM has no data disks attached
178+
// 2. The requested disk(s) to detach are not present on the VM
179+
// Skipping the update avoids unnecessary API calls and prevents generic updates to the VM
180+
// that can stay pending during critical failures (e.g., zone outages).
181+
// This update can block the client from seeing the successful detach of the disk(s).
182+
klog.Warningf("azureDisk - detach disk: VM update skipped as no disks to detach on node(%s) with diskMap(%v)", nodeName, diskMap)
183+
return nil
184+
}
185+
186+
if ss.IsStackCloud() {
187+
// Azure stack does not support ToBeDetached flag, use original way to detach disk
188+
var newDisks []*armcompute.DataDisk
189+
for _, disk := range disks {
190+
if !ptr.Deref(disk.ToBeDetached, false) {
191+
newDisks = append(newDisks, disk)
185192
}
186-
disks = newDisks
187193
}
194+
disks = newDisks
188195
}
189196

190197
newVM := &armcompute.VirtualMachineScaleSetVM{
@@ -207,13 +214,13 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis
207214
}
208215
}
209216

210-
logger.V(2).Info("azureDisk - update: vm - detach disk returned with error", "resourceGroup", nodeResourceGroup, "nodeName", nodeName, "diskMap", diskMap, "error", err)
211-
212217
if rerr == nil && result != nil && result.Properties != nil {
218+
logger.V(2).Info("azureDisk - update: vm - detach disk returned successfully", "resourceGroup", nodeResourceGroup, "nodeName", nodeName, "diskMap", diskMap)
213219
if err := ss.updateCache(ctx, vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, result); err != nil {
214220
logger.Error(err, "updateCache failed", "vmName", vmName, "resourceGroup", nodeResourceGroup, "vmssName", vm.VMSSName, "instanceID", vm.InstanceID)
215221
}
216222
} else {
223+
logger.V(2).Info("azureDisk - update: vm - detach disk returned with error", "resourceGroup", nodeResourceGroup, "nodeName", nodeName, "diskMap", diskMap, "error", rerr)
217224
_ = ss.DeleteCacheForNode(ctx, vmName)
218225
}
219226
return rerr

pkg/provider/azure_controller_vmss_test.go

Lines changed: 50 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,8 @@ func TestDetachDiskWithVMSS(t *testing.T) {
174174
vmssvmName types.NodeName
175175
disks []string
176176
forceDetach bool
177+
noDisksOnVM bool
178+
skipUpdate bool
177179
expectedErr bool
178180
expectedErrMsg string
179181
}{
@@ -221,11 +223,31 @@ func TestDetachDiskWithVMSS(t *testing.T) {
221223
expectedErrMsg: "instance not found",
222224
},
223225
{
224-
desc: "no error shall be returned if everything is good and the attaching disk does not match data disk",
226+
desc: "no error shall be returned if VM has disks but the disk to detach does not match any data disk",
225227
vmssVMList: []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
226228
vmssName: "vmss00",
227229
vmssvmName: "vmss00-vm-000000",
228230
disks: []string{"disk-name-err"},
231+
skipUpdate: true,
232+
expectedErr: false,
233+
},
234+
{
235+
desc: "no error shall be returned if VM has disks but multiple disks to detach do not match any data disk",
236+
vmssVMList: []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
237+
vmssName: "vmss00",
238+
vmssvmName: "vmss00-vm-000000",
239+
disks: []string{"disk-name-err", "another-nonexistent-disk"},
240+
skipUpdate: true,
241+
expectedErr: false,
242+
},
243+
{
244+
desc: "no error shall be returned if no disks are found on the VM and update is skipped",
245+
vmssVMList: []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
246+
vmssName: "vmss00",
247+
vmssvmName: "vmss00-vm-000000",
248+
disks: []string{diskName},
249+
noDisksOnVM: true,
250+
skipUpdate: true,
229251
expectedErr: false,
230252
},
231253
}
@@ -245,17 +267,13 @@ func TestDetachDiskWithVMSS(t *testing.T) {
245267
expectedVMSSVMs, _, _ := buildTestVirtualMachineEnv(testCloud, scaleSetName, "", 0, test.vmssVMList, "succeeded", false)
246268
var updatedVMSSVM *armcompute.VirtualMachineScaleSetVM
247269
for itr, vmssvm := range expectedVMSSVMs {
248-
vmssvm.Properties.StorageProfile = &armcompute.StorageProfile{
249-
OSDisk: &armcompute.OSDisk{
250-
Name: ptr.To("OSDisk1"),
251-
ManagedDisk: &armcompute.ManagedDiskParameters{
252-
ID: ptr.To("ManagedID"),
253-
DiskEncryptionSet: &armcompute.DiskEncryptionSetParameters{
254-
ID: ptr.To("DiskEncryptionSetID"),
255-
},
256-
},
257-
},
258-
DataDisks: []*armcompute.DataDisk{
270+
var dataDisks []*armcompute.DataDisk
271+
if test.noDisksOnVM {
272+
// No disks on the VM
273+
dataDisks = []*armcompute.DataDisk{}
274+
} else {
275+
// Default: VM has disks
276+
dataDisks = []*armcompute.DataDisk{
259277
{
260278
Lun: ptr.To(int32(0)),
261279
Name: ptr.To(diskName),
@@ -268,7 +286,19 @@ func TestDetachDiskWithVMSS(t *testing.T) {
268286
Lun: ptr.To(int32(2)),
269287
Name: ptr.To("disk3"),
270288
},
289+
}
290+
}
291+
vmssvm.Properties.StorageProfile = &armcompute.StorageProfile{
292+
OSDisk: &armcompute.OSDisk{
293+
Name: ptr.To("OSDisk1"),
294+
ManagedDisk: &armcompute.ManagedDiskParameters{
295+
ID: ptr.To("ManagedID"),
296+
DiskEncryptionSet: &armcompute.DiskEncryptionSetParameters{
297+
ID: ptr.To("DiskEncryptionSetID"),
298+
},
299+
},
271300
},
301+
DataDisks: dataDisks,
272302
}
273303

274304
if string(test.vmssvmName) == *vmssvm.Properties.OSProfile.ComputerName {
@@ -279,9 +309,11 @@ func TestDetachDiskWithVMSS(t *testing.T) {
279309
mockVMSSVMClient.EXPECT().ListVMInstanceView(gomock.Any(), testCloud.ResourceGroup, scaleSetName).Return(expectedVMSSVMs, nil).AnyTimes()
280310
if scaleSetName == strings.ToLower(string(fakeStatusNotFoundVMSSName)) {
281311
mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any()).Return(updatedVMSSVM, &azcore.ResponseError{StatusCode: http.StatusNotFound, ErrorCode: cloudprovider.InstanceNotFound.Error()}).AnyTimes()
282-
} else {
312+
} else if !test.skipUpdate {
313+
// Update should only be called if there are matching disks to detach
283314
mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any()).Return(updatedVMSSVM, nil).AnyTimes()
284315
}
316+
// If skipUpdate is true, we explicitly expect Update NOT to be called (no mock expectation set)
285317

286318
mockVMClient := testCloud.ComputeClientFactory.GetVirtualMachineClient().(*mock_virtualmachineclient.MockInterface)
287319
mockVMClient.EXPECT().List(gomock.Any(), ss.ResourceGroup).Return([]*armcompute.VirtualMachine{}, nil).AnyTimes()
@@ -300,7 +332,11 @@ func TestDetachDiskWithVMSS(t *testing.T) {
300332

301333
if !test.expectedErr {
302334
dataDisks, _, err := ss.GetDataDisks(context.TODO(), test.vmssvmName, azcache.CacheReadTypeDefault)
303-
assert.Equal(t, true, len(dataDisks) == 3, "TestCase[%d]: %s, actual data disk num: %d, err: %v", i, test.desc, len(dataDisks), err)
335+
if test.noDisksOnVM {
336+
assert.Equal(t, true, len(dataDisks) == 0, "TestCase[%d]: %s, actual data disk num: %d, err: %v", i, test.desc, len(dataDisks), err)
337+
} else {
338+
assert.Equal(t, true, len(dataDisks) == 3, "TestCase[%d]: %s, actual data disk num: %d, err: %v", i, test.desc, len(dataDisks), err)
339+
}
304340
}
305341
}
306342
}

0 commit comments

Comments
 (0)