Skip to content

Commit 29c4f3f

Browse files
committed
feat: migrate vm disk attach/detach to aaz
1 parent d135371 commit 29c4f3f

File tree

2 files changed

+231
-122
lines changed

2 files changed

+231
-122
lines changed

src/azure-cli/azure/cli/command_modules/vm/_vm_utils.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -780,3 +780,31 @@ class IdentityType(Enum):
780780
USER_ASSIGNED = 'UserAssigned'
781781
SYSTEM_ASSIGNED_USER_ASSIGNED = 'SystemAssigned, UserAssigned'
782782
NONE = 'None'
783+
784+
785+
def safe_get(d: dict, path: str, default=None):
786+
"""
787+
Safely fetch nested keys from a dict.
788+
Path format supports lists by index, e.g. 'storageProfile.dataDisks.0.managedDisk.id'.
789+
Returns `default` when any segment is missing or type doesn't match.
790+
"""
791+
cur = d
792+
for key in path.split('.'):
793+
if isinstance(cur, list):
794+
# list index access: only allow integer segments
795+
try:
796+
idx = int(key)
797+
except ValueError:
798+
return default
799+
try:
800+
cur = cur[idx]
801+
except IndexError:
802+
return default
803+
elif isinstance(cur, dict):
804+
# dict key access
805+
if key not in cur:
806+
return default
807+
cur = cur[key]
808+
else:
809+
return default
810+
return cur

src/azure-cli/azure/cli/command_modules/vm/custom.py

Lines changed: 203 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -2280,120 +2280,169 @@ def show_default_diagnostics_configuration(is_windows_os=False):
22802280

22812281

22822282
# region VirtualMachines Disks (Managed)
2283-
def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk=None, ids=None, disks=None, new=False, sku=None,
2284-
size_gb=None, lun=None, caching=None, enable_write_accelerator=False, disk_ids=None,
2285-
source_snapshots_or_disks=None, source_disk_restore_point=None,
2286-
new_names_of_source_snapshots_or_disks=None, new_names_of_source_disk_restore_point=None):
2287-
# attach multiple managed disks using disk attach API
2288-
vm = get_vm_to_update(cmd, resource_group_name, vm_name)
2283+
def attach_managed_data_disk(cmd, resource_group_name, vm_name,
2284+
disk=None, ids=None, disks=None, new=False, sku=None,
2285+
size_gb=None, lun=None, caching=None,
2286+
enable_write_accelerator=False, disk_ids=None,
2287+
source_snapshots_or_disks=None,
2288+
source_disk_restore_point=None,
2289+
new_names_of_source_snapshots_or_disks=None,
2290+
new_names_of_source_disk_restore_point=None,
2291+
no_wait=False):
2292+
if caching is None:
2293+
caching = 'None'
2294+
2295+
# attach existing managed disks (fast path)
22892296
if not new and not sku and not size_gb and disk_ids is not None:
2290-
if lun:
2291-
disk_lun = lun
2292-
else:
2293-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2297+
from .operations.vm import VMShow
2298+
vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={
2299+
'resource_group': resource_group_name,
2300+
'vm_name': vm_name
2301+
})
2302+
vm_dict = vm if isinstance(vm, dict) else getattr(vm, 'result', vm)
2303+
data_disks = vm_dict.get('storageProfile', {}).get('dataDisks', []) or []
2304+
used_luns = {d.get('lun') for d in data_disks if isinstance(d, dict) and d.get('lun') is not None}
2305+
2306+
def _next_lun(start=0):
2307+
i = start
2308+
while i in used_luns:
2309+
i += 1
2310+
used_luns.add(i)
2311+
return i
22942312

2295-
data_disks = []
2296-
for disk_item in disk_ids:
2297-
disk = {
2298-
'diskId': disk_item,
2299-
'caching': caching,
2313+
attach_payload = []
2314+
current_lun = lun
2315+
for disk_id in disk_ids:
2316+
if current_lun is not None:
2317+
disk_lun = _next_lun(start=current_lun)
2318+
current_lun = disk_lun + 1
2319+
else:
2320+
disk_lun = _next_lun()
2321+
2322+
payload = {
2323+
'diskId': disk_id,
23002324
'lun': disk_lun,
2301-
'writeAcceleratorEnabled': enable_write_accelerator
2325+
'caching': caching,
23022326
}
2303-
data_disks.append(disk)
2304-
disk_lun += 1
2305-
result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={
2327+
if enable_write_accelerator:
2328+
payload['writeAcceleratorEnabled'] = enable_write_accelerator
2329+
2330+
attach_payload.append(payload)
2331+
2332+
return AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={
23062333
'vm_name': vm_name,
23072334
'resource_group': resource_group_name,
2308-
'data_disks_to_attach': data_disks
2335+
'data_disks_to_attach': attach_payload,
2336+
'no_wait': no_wait
23092337
})
2310-
return result
2311-
else:
2312-
# attach multiple managed disks using vm PUT API
2313-
from azure.mgmt.core.tools import parse_resource_id
2314-
DataDisk, ManagedDiskParameters, DiskCreateOption = cmd.get_models(
2315-
'DataDisk', 'ManagedDiskParameters', 'DiskCreateOptionTypes')
2316-
if size_gb is None:
2317-
default_size_gb = 1023
23182338

2319-
if disk_ids is not None:
2320-
disks = disk_ids
2339+
# new / copy / restore
2340+
from azure.mgmt.core.tools import parse_resource_id
2341+
from .operations.vm import VMCreate as _VMCreate
2342+
2343+
class VMCreate(_VMCreate):
2344+
def pre_instance_create(self, instance):
2345+
storage_profile = instance.properties.storage_profile
2346+
data_disks = storage_profile.data_disks
2347+
2348+
data_disks_list = data_disks.to_serialized_data() if hasattr(data_disks, 'to_serialized_data') else data_disks
2349+
used_luns = set()
2350+
if isinstance(data_disks_list, list):
2351+
for d in data_disks_list:
2352+
if isinstance(d, dict) and 'lun' in d and d['lun'] is not None:
2353+
used_luns.add(d['lun'])
2354+
2355+
def _next_lun(start=0):
2356+
i = start
2357+
while i in used_luns:
2358+
i += 1
2359+
used_luns.add(i)
2360+
return i
23212361

2322-
for disk_item in disks:
2323-
if lun:
2324-
disk_lun = lun
2325-
else:
2326-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2362+
default_size_gb = 1023
2363+
disks_to_process = disk_ids if disk_ids is not None else disks
23272364

2328-
if new:
2329-
data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.empty,
2330-
name=parse_resource_id(disk_item)['name'],
2331-
disk_size_gb=size_gb if size_gb else default_size_gb, caching=caching,
2332-
managed_disk=ManagedDiskParameters(storage_account_type=sku))
2333-
else:
2334-
params = ManagedDiskParameters(id=disk_item, storage_account_type=sku)
2335-
data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.attach, managed_disk=params,
2336-
caching=caching)
2365+
# attach existing / new disks
2366+
if disks_to_process:
2367+
next_lun = _next_lun(start=lun) if lun is not None else None
23372368

2338-
if enable_write_accelerator:
2339-
data_disk.write_accelerator_enabled = enable_write_accelerator
2340-
2341-
vm.storage_profile.data_disks.append(data_disk)
2342-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2343-
if source_snapshots_or_disks is not None:
2344-
if new_names_of_source_snapshots_or_disks is None:
2345-
new_names_of_source_snapshots_or_disks = [None] * len(source_snapshots_or_disks)
2346-
for disk_id, disk_name in zip(source_snapshots_or_disks, new_names_of_source_snapshots_or_disks):
2347-
disk = {
2348-
'name': disk_name,
2349-
'create_option': 'Copy',
2350-
'caching': caching,
2351-
'lun': disk_lun,
2352-
'writeAcceleratorEnabled': enable_write_accelerator,
2353-
"sourceResource": {
2354-
"id": disk_id
2355-
}
2356-
}
2357-
if size_gb is not None:
2358-
disk.update({
2359-
'diskSizeGb': size_gb
2360-
})
2361-
if sku is not None:
2362-
disk.update({
2363-
"managedDisk": {
2364-
"storageAccountType": sku
2369+
for disk_item in disks_to_process:
2370+
if lun is not None:
2371+
disk_lun = next_lun
2372+
next_lun = _next_lun()
2373+
else:
2374+
disk_lun = _next_lun()
2375+
2376+
if new:
2377+
disk_name = parse_resource_id(disk_item)['name']
2378+
disk_obj = {
2379+
'name': disk_name,
2380+
'lun': disk_lun,
2381+
'createOption': 'Empty',
2382+
'diskSizeGb': size_gb if size_gb else default_size_gb,
2383+
'caching': caching
23652384
}
2366-
})
2367-
disk_lun += 1
2368-
vm.storage_profile.data_disks.append(disk)
2369-
if source_disk_restore_point is not None:
2370-
if new_names_of_source_disk_restore_point is None:
2371-
new_names_of_source_disk_restore_point = [None] * len(source_disk_restore_point)
2372-
for disk_id, disk_name in zip(source_disk_restore_point, new_names_of_source_disk_restore_point):
2373-
disk = {
2374-
'name': disk_name,
2375-
'create_option': 'Restore',
2376-
'caching': caching,
2377-
'lun': disk_lun,
2378-
'writeAcceleratorEnabled': enable_write_accelerator,
2379-
"sourceResource": {
2380-
"id": disk_id
2381-
}
2382-
}
2383-
if size_gb is not None:
2384-
disk.update({
2385-
'diskSizeGb': size_gb
2386-
})
2387-
if sku is not None:
2388-
disk.update({
2389-
"managedDisk": {
2390-
"storageAccountType": sku
2385+
if sku:
2386+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2387+
else:
2388+
disk_obj = {
2389+
'lun': disk_lun,
2390+
'createOption': 'Attach',
2391+
'caching': caching,
2392+
'managedDisk': {'id': disk_item}
23912393
}
2392-
})
2393-
disk_lun += 1
2394-
vm.storage_profile.data_disks.append(disk)
2394+
if sku:
2395+
disk_obj['managedDisk']['storageAccountType'] = sku
2396+
2397+
if enable_write_accelerator:
2398+
disk_obj['writeAcceleratorEnabled'] = True
2399+
2400+
data_disks.append(disk_obj)
2401+
2402+
# snapshot / copy
2403+
if source_snapshots_or_disks:
2404+
_new_names = new_names_of_source_snapshots_or_disks or [None] * len(source_snapshots_or_disks)
2405+
for src_id, name in zip(source_snapshots_or_disks, _new_names):
2406+
disk_lun = _next_lun()
2407+
disk_obj = {
2408+
'name': name,
2409+
'lun': disk_lun,
2410+
'createOption': 'Copy',
2411+
'sourceResource': {'id': src_id},
2412+
'caching': caching,
2413+
'writeAcceleratorEnabled': enable_write_accelerator
2414+
}
2415+
if size_gb is not None:
2416+
disk_obj['diskSizeGb'] = size_gb
2417+
if sku is not None:
2418+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2419+
data_disks.append(disk_obj)
2420+
2421+
# restore point
2422+
if source_disk_restore_point:
2423+
_new_names_rp = new_names_of_source_disk_restore_point or [None] * len(source_disk_restore_point)
2424+
for src_id, name in zip(source_disk_restore_point, _new_names_rp):
2425+
disk_lun = _next_lun()
2426+
disk_obj = {
2427+
'name': name,
2428+
'lun': disk_lun,
2429+
'createOption': 'Restore',
2430+
'sourceResource': {'id': src_id},
2431+
'caching': caching,
2432+
'writeAcceleratorEnabled': enable_write_accelerator
2433+
}
2434+
if size_gb is not None:
2435+
disk_obj['diskSizeGb'] = size_gb
2436+
if sku is not None:
2437+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2438+
data_disks.append(disk_obj)
23952439

2396-
set_vm(cmd, vm)
2440+
args = {
2441+
'resource_group': resource_group_name,
2442+
'vm_name': vm_name,
2443+
'no_wait': no_wait
2444+
}
2445+
return VMCreate(cli_ctx=cmd.cli_ctx)(command_args=args)
23972446

23982447

23992448
def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name):
@@ -2408,7 +2457,9 @@ def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name):
24082457
# endregion
24092458

24102459

2411-
def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None):
2460+
def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None,
2461+
force_detach=None, disk_ids=None,
2462+
no_wait=False):
24122463
if disk_ids is not None:
24132464
data_disks = []
24142465
for disk_item in disk_ids:
@@ -2422,27 +2473,57 @@ def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None,
24222473
return result
24232474
else:
24242475
# here we handle managed disk
2425-
vm = get_vm_to_update(cmd, resource_group_name, vm_name)
2426-
if not force_detach:
2427-
# pylint: disable=no-member
2428-
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()]
2429-
if len(vm.storage_profile.data_disks) == len(leftovers):
2430-
raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name))
2431-
else:
2432-
DiskDetachOptionTypes = cmd.get_models('DiskDetachOptionTypes', resource_type=ResourceType.MGMT_COMPUTE,
2433-
operation_group='virtual_machines')
2434-
leftovers = vm.storage_profile.data_disks
2435-
is_contains = False
2436-
for d in leftovers:
2437-
if d.name.lower() == disk_name.lower():
2438-
d.to_be_detached = True
2439-
d.detach_option = DiskDetachOptionTypes.FORCE_DETACH
2440-
is_contains = True
2441-
break
2442-
if not is_contains:
2443-
raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name))
2444-
vm.storage_profile.data_disks = leftovers
2445-
set_vm(cmd, vm)
2476+
from .operations.vm import VMShow
2477+
from ._vm_utils import safe_get
2478+
2479+
vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={
2480+
'resource_group': resource_group_name,
2481+
'vm_name': vm_name
2482+
})
2483+
2484+
# work on a local copy of the VM dict to avoid mutating the original object.
2485+
vm_result = vm if isinstance(vm, dict) else getattr(vm, 'result', vm)
2486+
vm_dict = json.loads(json.dumps(vm_result))
2487+
2488+
# to avoid unnecessary permission check of image
2489+
storage_profile = vm_dict.get('storageProfile', {})
2490+
storage_profile["imageReference"] = None
2491+
2492+
target_disk = None
2493+
data_disks = safe_get(vm_dict, 'storageProfile.dataDisks', default=[]) or []
2494+
for d in data_disks:
2495+
# Use dict-style access; AAZ returns dicts.
2496+
name = (d.get('name') or '').lower()
2497+
if name == (disk_name or '').lower():
2498+
target_disk = d
2499+
break
2500+
2501+
if not target_disk:
2502+
attached_names = [d.get('name') for d in (safe_get(vm_dict, 'storageProfile.dataDisks', []) or [])]
2503+
raise ResourceNotFoundError(
2504+
"No disk with the name '{}' was found. Attached: {}".format(disk_name, attached_names)
2505+
)
2506+
2507+
disk_id = safe_get(target_disk, 'managedDisk.id')
2508+
if not disk_id:
2509+
raise CLIError(
2510+
"Disk '{}' is not a managed disk (no managedDisk.id). Only managed disks are supported for this "
2511+
"operation."
2512+
.format(disk_name)
2513+
)
2514+
2515+
args = {
2516+
'vm_name': vm_name,
2517+
'resource_group': resource_group_name,
2518+
'data_disks_to_detach': [{
2519+
'diskId': disk_id,
2520+
'detachOption': 'ForceDetach' if force_detach else None
2521+
}],
2522+
'no_wait': no_wait
2523+
}
2524+
2525+
result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args=args)
2526+
return result
24462527
# endregion
24472528

24482529

0 commit comments

Comments
 (0)