Coverage for drivers/LVMSR.py : 48%
Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVMSR: VHD and QCOW2 on LVM storage repository
19#
21from sm_typing import Dict, List, override
23import SR
24from SR import deviceCheck
25import VDI
26import SRCommand
27import util
28import lvutil
29import lvmcache
30import scsiutil
31import lock
32import os
33import sys
34import time
35import errno
36import xs_errors
37import cleanup
38import blktap2
39from journaler import Journaler
40from refcounter import RefCounter
41from ipc import IPCFlag
42from constants import NS_PREFIX_LVM, VG_LOCATION, VG_PREFIX, CBT_BLOCK_SIZE
43from cowutil import CowUtil, getCowUtil, getImageStringFromVdiType, getVdiTypeFromImageFormat
44from lvmcowutil import LV_PREFIX, LvmCowUtil
45from lvmanager import LVActivator
46from vditype import VdiType
47import XenAPI # pylint: disable=import-error
48import re
49from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
50 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
51 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
52 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
53 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
54from metadata import retrieveXMLfromFile, _parseXML
55from xmlrpc.client import DateTime
56import glob
57from constants import CBTLOG_TAG
58from fairlock import Fairlock
59DEV_MAPPER_ROOT = os.path.join('/dev/mapper', VG_PREFIX)
61geneology: Dict[str, List[str]] = {}
62CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
63 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
64 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
65 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
66 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
68CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
70DRIVER_INFO = {
71 'name': 'Local VHD and QCOW2 on LVM',
72 'description': 'SR plugin which represents disks as VHD and QCOW2 disks on ' + \
73 'Logical Volumes within a locally-attached Volume Group',
74 'vendor': 'XenSource Inc',
75 'copyright': '(C) 2008 XenSource Inc',
76 'driver_version': '1.0',
77 'required_api_version': '1.0',
78 'capabilities': CAPABILITIES,
79 'configuration': CONFIGURATION
80 }
82CREATE_PARAM_TYPES = {
83 "raw": VdiType.RAW,
84 "vhd": VdiType.VHD,
85 "qcow2": VdiType.QCOW2
86}
88OPS_EXCLUSIVE = [
89 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
90 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
91 "vdi_clone"]
93# Log if snapshot pauses VM for more than this many seconds
94LONG_SNAPTIME = 60
96class LVMSR(SR.SR):
97 DRIVER_TYPE = 'lvhd'
99 PROVISIONING_TYPES = ["thin", "thick"]
100 PROVISIONING_DEFAULT = "thick"
101 THIN_PLUGIN = "lvhd-thin"
103 PLUGIN_ON_SLAVE = "on-slave"
105 FLAG_USE_VHD = "use_vhd"
106 MDVOLUME_NAME = "MGT"
108 ALLOCATION_QUANTUM = "allocation_quantum"
109 INITIAL_ALLOCATION = "initial_allocation"
111 LOCK_RETRY_INTERVAL = 3
112 LOCK_RETRY_ATTEMPTS = 10
114 TEST_MODE_KEY = "testmode"
115 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
116 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
117 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
118 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
119 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
120 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
121 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
123 ENV_VAR_VHD_TEST = {
124 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
125 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
126 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
127 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
128 TEST_MODE_VHD_FAIL_REPARENT_END:
129 "VHD_UTIL_TEST_FAIL_REPARENT_END",
130 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
131 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
132 TEST_MODE_VHD_FAIL_RESIZE_DATA:
133 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
134 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
135 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
136 TEST_MODE_VHD_FAIL_RESIZE_END:
137 "VHD_UTIL_TEST_FAIL_RESIZE_END"
138 }
139 testMode = ""
141 legacyMode = True
143 @override
144 @staticmethod
145 def handles(type) -> bool:
146 """Returns True if this SR class understands the given dconf string"""
147 # we can pose as LVMSR or EXTSR for compatibility purposes
148 if __name__ == '__main__':
149 name = sys.argv[0]
150 else:
151 name = __name__
152 if name.endswith("LVMSR"):
153 return type == "lvm"
154 elif name.endswith("EXTSR"):
155 return type == "ext"
156 return type == LVMSR.DRIVER_TYPE
158 def __init__(self, srcmd, sr_uuid):
159 SR.SR.__init__(self, srcmd, sr_uuid)
160 self._init_preferred_image_formats()
162 @override
163 def load(self, sr_uuid) -> None:
164 self.ops_exclusive = OPS_EXCLUSIVE
166 self.isMaster = False
167 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
168 self.isMaster = True
170 self.lock = lock.Lock(lock.LOCK_TYPE_SR, self.uuid)
171 self.sr_vditype = SR.DEFAULT_TAP
172 self.uuid = sr_uuid
173 self.vgname = VG_PREFIX + self.uuid
174 self.path = os.path.join(VG_LOCATION, self.vgname)
175 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
176 self.provision = self.PROVISIONING_DEFAULT
178 has_sr_ref = self.srcmd.params.get("sr_ref")
179 if has_sr_ref:
180 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
181 else:
182 self.other_conf = None
184 self.lvm_conf = None
185 if self.other_conf:
186 self.lvm_conf = self.other_conf.get('lvm-conf')
188 try:
189 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf)
190 except:
191 raise xs_errors.XenError('SRUnavailable', \
192 opterr='Failed to initialise the LVMCache')
193 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
194 self.journaler = Journaler(self.lvmCache)
195 if not has_sr_ref:
196 return # must be a probe call
197 # Test for thick vs thin provisioning conf parameter
198 if 'allocation' in self.dconf: 198 ↛ 199line 198 didn't jump to line 199, because the condition on line 198 was never true
199 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
200 self.provision = self.dconf['allocation']
201 else:
202 raise xs_errors.XenError('InvalidArg', \
203 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
205 if self.other_conf.get(self.TEST_MODE_KEY): 205 ↛ 209line 205 didn't jump to line 209, because the condition on line 205 was never false
206 self.testMode = self.other_conf[self.TEST_MODE_KEY]
207 self._prepareTestMode()
209 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
210 # sm_config flag overrides PBD, if any
211 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
212 self.provision = self.sm_config.get('allocation')
214 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
215 self.legacyMode = False
217 if lvutil._checkVG(self.vgname):
218 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 218 ↛ 221line 218 didn't jump to line 221, because the condition on line 218 was never false
219 "vdi_activate", "vdi_deactivate"]:
220 self._undoAllJournals()
221 if not self.cmd in ["sr_attach", "sr_probe"]:
222 self._checkMetadataVolume()
224 self.mdexists = False
226 # get a VDI -> TYPE map from the storage
227 contains_uuid_regex = \
228 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
229 self.storageVDIs = {}
231 for key in self.lvmCache.lvs.keys(): 231 ↛ 233line 231 didn't jump to line 233, because the loop on line 231 never started
232 # if the lvname has a uuid in it
233 type = None
234 vdi = None
235 if contains_uuid_regex.search(key) is not None:
236 for vdi_type, prefix in LV_PREFIX.items():
237 if key.startswith(prefix):
238 vdi = key[len(prefix):]
239 self.storageVDIs[vdi] = vdi_type
240 break
242 # check if metadata volume exists
243 try:
244 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
245 except:
246 pass
248 @override
249 def cleanup(self) -> None:
250 # we don't need to hold the lock to dec refcounts of activated LVs
251 if not self.lvActivator.deactivateAll(): 251 ↛ 252line 251 didn't jump to line 252, because the condition on line 251 was never true
252 raise util.SMException("failed to deactivate LVs")
254 def updateSRMetadata(self, allocation):
255 try:
256 # Add SR specific SR metadata
257 sr_info = \
258 {ALLOCATION_TAG: allocation,
259 UUID_TAG: self.uuid,
260 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
261 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
262 }
264 vdi_info = {}
265 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
266 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
268 vdi_type = self.session.xenapi.VDI.get_sm_config(vdi).get('vdi_type')
269 if not vdi_type:
270 raise xs_errors.XenError('MetadataError', opterr=f"Missing `vdi_type` for VDI {vdi_uuid}")
272 # Create the VDI entry in the SR metadata
273 vdi_info[vdi_uuid] = \
274 {
275 UUID_TAG: vdi_uuid,
276 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
277 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
278 IS_A_SNAPSHOT_TAG: \
279 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
280 SNAPSHOT_OF_TAG: \
281 self.session.xenapi.VDI.get_snapshot_of(vdi),
282 SNAPSHOT_TIME_TAG: \
283 self.session.xenapi.VDI.get_snapshot_time(vdi),
284 TYPE_TAG: \
285 self.session.xenapi.VDI.get_type(vdi),
286 VDI_TYPE_TAG: \
287 vdi_type,
288 READ_ONLY_TAG: \
289 int(self.session.xenapi.VDI.get_read_only(vdi)),
290 METADATA_OF_POOL_TAG: \
291 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
292 MANAGED_TAG: \
293 int(self.session.xenapi.VDI.get_managed(vdi))
294 }
295 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
297 except Exception as e:
298 raise xs_errors.XenError('MetadataError', \
299 opterr='Error upgrading SR Metadata: %s' % str(e))
301 def syncMetadataAndStorage(self):
302 try:
303 # if a VDI is present in the metadata but not in the storage
304 # then delete it from the metadata
305 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
306 for vdi in list(vdi_info.keys()):
307 update_map = {}
308 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 308 ↛ 315line 308 didn't jump to line 315, because the condition on line 308 was never false
309 # delete this from metadata
310 LVMMetadataHandler(self.mdpath). \
311 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
312 else:
313 # search for this in the metadata, compare types
314 # self.storageVDIs is a map of vdi_uuid to vdi_type
315 if vdi_info[vdi][VDI_TYPE_TAG] != \
316 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
317 # storage type takes authority
318 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
319 = METADATA_OBJECT_TYPE_VDI
320 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
321 update_map[VDI_TYPE_TAG] = \
322 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
323 LVMMetadataHandler(self.mdpath) \
324 .updateMetadata(update_map)
325 else:
326 # This should never happen
327 pass
329 except Exception as e:
330 raise xs_errors.XenError('MetadataError', \
331 opterr='Error synching SR Metadata and storage: %s' % str(e))
333 def syncMetadataAndXapi(self):
334 try:
335 # get metadata
336 (sr_info, vdi_info) = \
337 LVMMetadataHandler(self.mdpath, False).getMetadata()
339 # First synch SR parameters
340 self.update(self.uuid)
342 # Now update the VDI information in the metadata if required
343 for vdi_offset in vdi_info.keys():
344 try:
345 vdi_ref = \
346 self.session.xenapi.VDI.get_by_uuid( \
347 vdi_info[vdi_offset][UUID_TAG])
348 except:
349 # may be the VDI is not in XAPI yet dont bother
350 continue
352 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
353 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
355 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
356 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
357 new_name_description:
358 update_map = {}
359 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
360 METADATA_OBJECT_TYPE_VDI
361 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
362 update_map[NAME_LABEL_TAG] = new_name_label
363 update_map[NAME_DESCRIPTION_TAG] = new_name_description
364 LVMMetadataHandler(self.mdpath) \
365 .updateMetadata(update_map)
366 except Exception as e:
367 raise xs_errors.XenError('MetadataError', \
368 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
370 def _checkMetadataVolume(self):
371 util.SMlog("Entering _checkMetadataVolume")
372 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
373 if self.isMaster: 373 ↛ 389line 373 didn't jump to line 389, because the condition on line 373 was never false
374 if self.mdexists and self.cmd == "sr_attach":
375 try:
376 # activate the management volume
377 # will be deactivated at detach time
378 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
379 self._synchSmConfigWithMetaData()
380 util.SMlog("Sync SR metadata and the state on the storage.")
381 self.syncMetadataAndStorage()
382 self.syncMetadataAndXapi()
383 except Exception as e:
384 util.SMlog("Exception in _checkMetadataVolume, " \
385 "Error: %s." % str(e))
386 elif not self.mdexists and not self.legacyMode: 386 ↛ 389line 386 didn't jump to line 389, because the condition on line 386 was never false
387 self._introduceMetaDataVolume()
389 if self.mdexists:
390 self.legacyMode = False
392 def _synchSmConfigWithMetaData(self):
393 util.SMlog("Synching sm-config with metadata volume")
395 try:
396 # get SR info from metadata
397 sr_info = {}
398 map = {}
399 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
401 if sr_info == {}: 401 ↛ 402line 401 didn't jump to line 402, because the condition on line 401 was never true
402 raise Exception("Failed to get SR information from metadata.")
404 if "allocation" in sr_info: 404 ↛ 408line 404 didn't jump to line 408, because the condition on line 404 was never false
405 self.provision = sr_info.get("allocation")
406 map['allocation'] = sr_info.get("allocation")
407 else:
408 raise Exception("Allocation key not found in SR metadata. "
409 "SR info found: %s" % sr_info)
411 except Exception as e:
412 raise xs_errors.XenError(
413 'MetadataError',
414 opterr='Error reading SR params from '
415 'metadata Volume: %s' % str(e))
416 try:
417 map[self.FLAG_USE_VHD] = 'true'
418 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
419 except:
420 raise xs_errors.XenError(
421 'MetadataError',
422 opterr='Error updating sm_config key')
424 def _introduceMetaDataVolume(self):
425 util.SMlog("Creating Metadata volume")
426 try:
427 config = {}
428 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
430 # activate the management volume, will be deactivated at detach time
431 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
433 name_label = util.to_plain_string( \
434 self.session.xenapi.SR.get_name_label(self.sr_ref))
435 name_description = util.to_plain_string( \
436 self.session.xenapi.SR.get_name_description(self.sr_ref))
437 config[self.FLAG_USE_VHD] = "true"
438 config['allocation'] = self.provision
439 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
441 # Add the SR metadata
442 self.updateSRMetadata(self.provision)
443 except Exception as e:
444 raise xs_errors.XenError('MetadataError', \
445 opterr='Error introducing Metadata Volume: %s' % str(e))
447 def _removeMetadataVolume(self):
448 if self.mdexists:
449 try:
450 self.lvmCache.remove(self.MDVOLUME_NAME)
451 except:
452 raise xs_errors.XenError('MetadataError', \
453 opterr='Failed to delete MGT Volume')
455 def _refresh_size(self):
456 """
457 Refreshs the size of the backing device.
458 Return true if all paths/devices agree on the same size.
459 """
460 if hasattr(self, 'SCSIid'): 460 ↛ 462line 460 didn't jump to line 462, because the condition on line 460 was never true
461 # LVMoHBASR, LVMoISCSISR
462 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
463 else:
464 # LVMSR
465 devices = self.dconf['device'].split(',')
466 scsiutil.refreshdev(devices)
467 return True
469 def _expand_size(self):
470 """
471 Expands the size of the SR by growing into additional availiable
472 space, if extra space is availiable on the backing device.
473 Needs to be called after a successful call of _refresh_size.
474 """
475 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
476 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
477 resizethreshold = 100 * 1024 * 1024 # 100MB
478 devices = self.dconf['device'].split(',')
479 totaldevicesize = 0
480 for device in devices:
481 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
482 if totaldevicesize >= (currentvgsize + resizethreshold):
483 try:
484 if hasattr(self, 'SCSIid'): 484 ↛ 486line 484 didn't jump to line 486, because the condition on line 484 was never true
485 # LVMoHBASR, LVMoISCSISR might have slaves
486 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
487 getattr(self, 'SCSIid'))
488 util.SMlog("LVMSR._expand_size for %s will resize the pv." %
489 self.uuid)
490 for pv in lvutil.get_pv_for_vg(self.vgname):
491 lvutil.resizePV(pv)
492 except:
493 util.logException("LVMSR._expand_size for %s failed to resize"
494 " the PV" % self.uuid)
496 @override
497 @deviceCheck
498 def create(self, uuid, size) -> None:
499 util.SMlog("LVMSR.create for %s" % self.uuid)
500 if not self.isMaster:
501 util.SMlog('sr_create blocked for non-master')
502 raise xs_errors.XenError('LVMMaster')
504 if lvutil._checkVG(self.vgname):
505 raise xs_errors.XenError('SRExists')
507 # Check none of the devices already in use by other PBDs
508 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
509 raise xs_errors.XenError('SRInUse')
511 # Check serial number entry in SR records
512 for dev in self.dconf['device'].split(','):
513 if util.test_scsiserial(self.session, dev):
514 raise xs_errors.XenError('SRInUse')
516 lvutil.createVG(self.dconf['device'], self.vgname)
518 #Update serial number string
519 scsiutil.add_serial_record(self.session, self.sr_ref, \
520 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
522 # since this is an SR.create turn off legacy mode
523 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
524 self.FLAG_USE_VHD, 'true')
526 @override
527 def delete(self, uuid) -> None:
528 util.SMlog("LVMSR.delete for %s" % self.uuid)
529 if not self.isMaster:
530 raise xs_errors.XenError('LVMMaster')
531 cleanup.gc_force(self.session, self.uuid)
533 success = True
534 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
535 if util.extractSRFromDevMapper(fileName) != self.uuid:
536 continue
538 if util.doesFileHaveOpenHandles(fileName):
539 util.SMlog("LVMSR.delete: The dev mapper entry %s has open " \
540 "handles" % fileName)
541 success = False
542 continue
544 # Now attempt to remove the dev mapper entry
545 if not lvutil.removeDevMapperEntry(fileName, False):
546 success = False
547 continue
549 try:
550 lvname = os.path.basename(fileName.replace('-', '/'). \
551 replace('//', '-'))
552 lpath = os.path.join(self.path, lvname)
553 os.unlink(lpath)
554 except OSError as e:
555 if e.errno != errno.ENOENT:
556 util.SMlog("LVMSR.delete: failed to remove the symlink for " \
557 "file %s. Error: %s" % (fileName, str(e)))
558 success = False
560 if success:
561 try:
562 if util.pathexists(self.path):
563 os.rmdir(self.path)
564 except Exception as e:
565 util.SMlog("LVMSR.delete: failed to remove the symlink " \
566 "directory %s. Error: %s" % (self.path, str(e)))
567 success = False
569 self._removeMetadataVolume()
570 self.lvmCache.refresh()
571 if LvmCowUtil.getVolumeInfo(self.lvmCache):
572 raise xs_errors.XenError('SRNotEmpty')
574 if not success:
575 raise Exception("LVMSR delete failed, please refer to the log " \
576 "for details.")
578 lvutil.removeVG(self.dconf['device'], self.vgname)
579 self._cleanup()
581 @override
582 def attach(self, uuid) -> None:
583 util.SMlog("LVMSR.attach for %s" % self.uuid)
585 self._cleanup(True) # in case of host crashes, if detach wasn't called
587 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 587 ↛ 588line 587 didn't jump to line 588, because the condition on line 587 was never true
588 raise xs_errors.XenError('SRUnavailable', \
589 opterr='no such volume group: %s' % self.vgname)
591 # Refresh the metadata status
592 self._checkMetadataVolume()
594 refreshsizeok = self._refresh_size()
596 if self.isMaster: 596 ↛ 607line 596 didn't jump to line 607, because the condition on line 596 was never false
597 if refreshsizeok: 597 ↛ 601line 597 didn't jump to line 601, because the condition on line 597 was never false
598 self._expand_size()
600 # Update SCSIid string
601 util.SMlog("Calling devlist_to_serial")
602 scsiutil.add_serial_record(
603 self.session, self.sr_ref,
604 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
606 # Test Legacy Mode Flag and update if COW volumes exist
607 if self.isMaster and self.legacyMode: 607 ↛ 608line 607 didn't jump to line 608, because the condition on line 607 was never true
608 vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
609 for uuid, info in vdiInfo.items():
610 if VdiType.isCowImage(info.vdiType):
611 self.legacyMode = False
612 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
613 self._introduceMetaDataVolume()
614 break
616 # Set the block scheduler
617 for dev in self.dconf['device'].split(','):
618 self.block_setscheduler(dev)
620 @override
621 def detach(self, uuid) -> None:
622 util.SMlog("LVMSR.detach for %s" % self.uuid)
623 cleanup.abort(self.uuid)
625 # Do a best effort cleanup of the dev mapper entries
626 # go through all devmapper entries for this VG
627 success = True
628 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
629 if util.extractSRFromDevMapper(fileName) != self.uuid: 629 ↛ 630line 629 didn't jump to line 630, because the condition on line 629 was never true
630 continue
632 with Fairlock('devicemapper'):
633 # check if any file has open handles
634 if util.doesFileHaveOpenHandles(fileName):
635 # if yes, log this and signal failure
636 util.SMlog(
637 f"LVMSR.detach: The dev mapper entry {fileName} has "
638 "open handles")
639 success = False
640 continue
642 # Now attempt to remove the dev mapper entry
643 if not lvutil.removeDevMapperEntry(fileName, False): 643 ↛ 644line 643 didn't jump to line 644, because the condition on line 643 was never true
644 success = False
645 continue
647 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
648 try:
649 lvname = os.path.basename(fileName.replace('-', '/'). \
650 replace('//', '-'))
651 lvname = os.path.join(self.path, lvname)
652 util.force_unlink(lvname)
653 except Exception as e:
654 util.SMlog("LVMSR.detach: failed to remove the symlink for " \
655 "file %s. Error: %s" % (fileName, str(e)))
656 success = False
658 # now remove the directory where the symlinks are
659 # this should pass as the directory should be empty by now
660 if success:
661 try:
662 if util.pathexists(self.path): 662 ↛ 663line 662 didn't jump to line 663, because the condition on line 662 was never true
663 os.rmdir(self.path)
664 except Exception as e:
665 util.SMlog("LVMSR.detach: failed to remove the symlink " \
666 "directory %s. Error: %s" % (self.path, str(e)))
667 success = False
669 if not success:
670 raise Exception("SR detach failed, please refer to the log " \
671 "for details.")
673 # Don't delete lock files on the master as it will break the locking
674 # between SM and any GC thread that survives through SR.detach.
675 # However, we should still delete lock files on slaves as it is the
676 # only place to do so.
677 self._cleanup(self.isMaster)
679 @override
680 def forget_vdi(self, uuid) -> None:
681 if not self.legacyMode:
682 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
683 super(LVMSR, self).forget_vdi(uuid)
685 @override
686 def scan(self, uuid) -> None:
687 activated_lvs = set()
688 try:
689 util.SMlog("LVMSR.scan for %s" % self.uuid)
690 if not self.isMaster: 690 ↛ 691line 690 didn't jump to line 691, because the condition on line 690 was never true
691 util.SMlog('sr_scan blocked for non-master')
692 raise xs_errors.XenError('LVMMaster')
694 if self._refresh_size(): 694 ↛ 696line 694 didn't jump to line 696, because the condition on line 694 was never false
695 self._expand_size()
696 self.lvmCache.refresh()
697 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
698 self._loadvdis()
699 stats = lvutil._getVGstats(self.vgname)
700 self.physical_size = stats['physical_size']
701 self.physical_utilisation = stats['physical_utilisation']
703 # Now check if there are any VDIs in the metadata, which are not in
704 # XAPI
705 if self.mdexists: 705 ↛ 816line 705 didn't jump to line 816, because the condition on line 705 was never false
706 vdiToSnaps: Dict[str, List[str]] = {}
707 # get VDIs from XAPI
708 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
709 vdi_uuids = set([])
710 for vdi in vdis:
711 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
713 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
715 for vdi in list(info.keys()):
716 vdi_uuid = info[vdi][UUID_TAG]
717 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 717 ↛ 718line 717 didn't jump to line 718, because the condition on line 717 was never true
718 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
719 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
720 else:
721 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
723 if vdi_uuid not in vdi_uuids:
724 util.SMlog("Introduce VDI %s as it is present in " \
725 "metadata and not in XAPI." % vdi_uuid)
726 vdi_type = info[vdi][VDI_TYPE_TAG]
727 sm_config = {}
728 sm_config['vdi_type'] = vdi_type
729 lvname = "%s%s" % (LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
730 self.lvActivator.activate(
731 vdi_uuid, lvname, LVActivator.NORMAL)
732 activated_lvs.add(vdi_uuid)
733 lvPath = os.path.join(self.path, lvname)
735 if not VdiType.isCowImage(vdi_type): 735 ↛ 736line 735 didn't jump to line 736, because the condition on line 735 was never true
736 size = self.lvmCache.getSize(LV_PREFIX[vdi_type] + vdi_uuid)
737 utilisation = \
738 util.roundup(lvutil.LVM_SIZE_INCREMENT,
739 int(size))
740 else:
741 cowutil = getCowUtil(vdi_type)
742 lvmcowutil = LvmCowUtil(cowutil)
744 parent = cowutil.getParentNoCheck(lvPath)
746 if parent is not None: 746 ↛ 747line 746 didn't jump to line 747, because the condition on line 746 was never true
747 sm_config['vhd-parent'] = parent[parent.find('-') + 1:]
748 size = cowutil.getSizeVirt(lvPath)
749 if self.provision == "thin": 749 ↛ 750line 749 didn't jump to line 750, because the condition on line 749 was never true
750 utilisation = util.roundup(
751 lvutil.LVM_SIZE_INCREMENT,
752 cowutil.calcOverheadEmpty(max(size, cowutil.getDefaultPreallocationSizeVirt()))
753 )
754 else:
755 utilisation = lvmcowutil.calcVolumeSize(int(size))
757 vdi_ref = self.session.xenapi.VDI.db_introduce(
758 vdi_uuid,
759 info[vdi][NAME_LABEL_TAG],
760 info[vdi][NAME_DESCRIPTION_TAG],
761 self.sr_ref,
762 info[vdi][TYPE_TAG],
763 False,
764 bool(int(info[vdi][READ_ONLY_TAG])),
765 {},
766 vdi_uuid,
767 {},
768 sm_config)
770 self.session.xenapi.VDI.set_managed(vdi_ref,
771 bool(int(info[vdi][MANAGED_TAG])))
772 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
773 str(size))
774 self.session.xenapi.VDI.set_physical_utilisation( \
775 vdi_ref, str(utilisation))
776 self.session.xenapi.VDI.set_is_a_snapshot( \
777 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG])))
778 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 778 ↛ 779line 778 didn't jump to line 779, because the condition on line 778 was never true
779 self.session.xenapi.VDI.set_snapshot_time( \
780 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG]))
781 if info[vdi][TYPE_TAG] == 'metadata': 781 ↛ 782line 781 didn't jump to line 782, because the condition on line 781 was never true
782 self.session.xenapi.VDI.set_metadata_of_pool( \
783 vdi_ref, info[vdi][METADATA_OF_POOL_TAG])
785 # Update CBT status of disks either just added
786 # or already in XAPI
787 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
788 if cbt_logname in cbt_vdis: 788 ↛ 789line 788 didn't jump to line 789, because the condition on line 788 was never true
789 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
790 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
791 # For existing VDIs, update local state too
792 # Scan in base class SR updates existing VDIs
793 # again based on local states
794 if vdi_uuid in self.vdis:
795 self.vdis[vdi_uuid].cbt_enabled = True
796 cbt_vdis.remove(cbt_logname)
798 # Now set the snapshot statuses correctly in XAPI
799 for srcvdi in vdiToSnaps.keys(): 799 ↛ 800line 799 didn't jump to line 800, because the loop on line 799 never started
800 try:
801 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
802 except:
803 # the source VDI no longer exists, continue
804 continue
806 for snapvdi in vdiToSnaps[srcvdi]:
807 try:
808 # this might fail in cases where its already set
809 snapref = \
810 self.session.xenapi.VDI.get_by_uuid(snapvdi)
811 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
812 except Exception as e:
813 util.SMlog("Setting snapshot failed. " \
814 "Error: %s" % str(e))
816 if cbt_vdis: 816 ↛ 827line 816 didn't jump to line 827, because the condition on line 816 was never false
817 # If we have items remaining in this list,
818 # they are cbt_metadata VDI that XAPI doesn't know about
819 # Add them to self.vdis and they'll get added to the DB
820 for cbt_vdi in cbt_vdis: 820 ↛ 821line 820 didn't jump to line 821, because the loop on line 820 never started
821 cbt_uuid = cbt_vdi.split(".")[0]
822 new_vdi = self.vdi(cbt_uuid)
823 new_vdi.ty = "cbt_metadata"
824 new_vdi.cbt_enabled = True
825 self.vdis[cbt_uuid] = new_vdi
827 super(LVMSR, self).scan(uuid)
828 self._kickGC()
830 finally:
831 for vdi in activated_lvs:
832 self.lvActivator.deactivate(
833 vdi, LVActivator.NORMAL, False)
835 @override
836 def update(self, uuid) -> None:
837 if not lvutil._checkVG(self.vgname): 837 ↛ 838line 837 didn't jump to line 838, because the condition on line 837 was never true
838 return
839 self._updateStats(uuid, 0)
841 if self.legacyMode: 841 ↛ 842line 841 didn't jump to line 842, because the condition on line 841 was never true
842 return
844 # synch name_label in metadata with XAPI
845 update_map = {}
846 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
847 METADATA_OBJECT_TYPE_SR,
848 NAME_LABEL_TAG: util.to_plain_string( \
849 self.session.xenapi.SR.get_name_label(self.sr_ref)),
850 NAME_DESCRIPTION_TAG: util.to_plain_string( \
851 self.session.xenapi.SR.get_name_description(self.sr_ref))
852 }
853 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
855 def _updateStats(self, uuid, virtAllocDelta):
856 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
857 self.virtual_allocation = valloc + virtAllocDelta
858 util.SMlog("Setting virtual_allocation of SR %s to %d" %
859 (uuid, self.virtual_allocation))
860 stats = lvutil._getVGstats(self.vgname)
861 self.physical_size = stats['physical_size']
862 self.physical_utilisation = stats['physical_utilisation']
863 self._db_update()
865 @override
866 @deviceCheck
867 def probe(self) -> str:
868 return lvutil.srlist_toxml(
869 lvutil.scan_srlist(VG_PREFIX, self.dconf['device']),
870 VG_PREFIX,
871 ('metadata' in self.srcmd.params['sr_sm_config'] and \
872 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
874 @override
875 def vdi(self, uuid) -> VDI.VDI:
876 return LVMVDI(self, uuid)
878 def _loadvdis(self):
879 self.virtual_allocation = 0
880 self.vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
881 self.allVDIs = {}
883 for uuid, info in self.vdiInfo.items():
884 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 884 ↛ 885line 884 didn't jump to line 885, because the condition on line 884 was never true
885 continue
886 if info.scanError: 886 ↛ 887line 886 didn't jump to line 887, because the condition on line 886 was never true
887 raise xs_errors.XenError('VDIUnavailable', \
888 opterr='Error scanning VDI %s' % uuid)
889 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
890 if not self.vdis[uuid].hidden: 890 ↛ 883line 890 didn't jump to line 883, because the condition on line 890 was never false
891 self.virtual_allocation += self.vdis[uuid].utilisation
893 for uuid, vdi in self.vdis.items():
894 if vdi.parent: 894 ↛ 895line 894 didn't jump to line 895, because the condition on line 894 was never true
895 if vdi.parent in self.vdis:
896 self.vdis[vdi.parent].read_only = True
897 if vdi.parent in geneology:
898 geneology[vdi.parent].append(uuid)
899 else:
900 geneology[vdi.parent] = [uuid]
902 # Now remove all hidden leaf nodes to avoid introducing records that
903 # will be GC'ed
904 for uuid in list(self.vdis.keys()):
905 if uuid not in geneology and self.vdis[uuid].hidden: 905 ↛ 906line 905 didn't jump to line 906, because the condition on line 905 was never true
906 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
907 del self.vdis[uuid]
909 def _ensureSpaceAvailable(self, amount_needed):
910 space_available = lvutil._getVGstats(self.vgname)['freespace']
911 if (space_available < amount_needed):
912 util.SMlog("Not enough space! free space: %d, need: %d" % \
913 (space_available, amount_needed))
914 raise xs_errors.XenError('SRNoSpace')
916 def _handleInterruptedCloneOps(self):
917 entries = self.journaler.getAll(LVMVDI.JRN_CLONE)
918 for uuid, val in entries.items(): 918 ↛ 919line 918 didn't jump to line 919, because the loop on line 918 never started
919 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
920 self._handleInterruptedCloneOp(uuid, val)
921 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
922 self.journaler.remove(LVMVDI.JRN_CLONE, uuid)
924 def _handleInterruptedCoalesceLeaf(self):
925 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
926 if len(entries) > 0: 926 ↛ 927line 926 didn't jump to line 927, because the condition on line 926 was never true
927 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
928 cleanup.gc_force(self.session, self.uuid)
929 self.lvmCache.refresh()
931 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
932 """Either roll back or finalize the interrupted snapshot/clone
933 operation. Rolling back is unsafe if the leaf images have already been
934 in use and written to. However, it is always safe to roll back while
935 we're still in the context of the failed snapshot operation since the
936 VBD is paused for the duration of the operation"""
937 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
938 lvs = LvmCowUtil.getVolumeInfo(self.lvmCache)
939 baseUuid, clonUuid = jval.split("_")
941 # is there a "base copy" VDI?
942 if not lvs.get(baseUuid):
943 # no base copy: make sure the original is there
944 if lvs.get(origUuid):
945 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
946 return
947 raise util.SMException("base copy %s not present, " \
948 "but no original %s found" % (baseUuid, origUuid))
950 vdis = LvmCowUtil.getVDIInfo(self.lvmCache)
951 base = vdis[baseUuid]
952 cowutil = getCowUtil(base.vdiType)
954 if forceUndo:
955 util.SMlog("Explicit revert")
956 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
957 return
959 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
960 util.SMlog("One or both leaves missing => revert")
961 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
962 return
964 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
965 util.SMlog("One or both leaves invalid => revert")
966 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
967 return
969 orig = vdis[origUuid]
970 self.lvActivator.activate(baseUuid, base.lvName, False)
971 self.lvActivator.activate(origUuid, orig.lvName, False)
972 if orig.parentUuid != baseUuid:
973 parent = vdis[orig.parentUuid]
974 self.lvActivator.activate(parent.uuid, parent.lvName, False)
975 origPath = os.path.join(self.path, orig.lvName)
977 if cowutil.check(origPath) != CowUtil.CheckResult.Success:
978 util.SMlog("Orig image invalid => revert")
979 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
980 return
982 if clonUuid:
983 clon = vdis[clonUuid]
984 clonPath = os.path.join(self.path, clon.lvName)
985 self.lvActivator.activate(clonUuid, clon.lvName, False)
986 if cowutil.check(clonPath) != CowUtil.CheckResult.Success:
987 util.SMlog("Clon image invalid => revert")
988 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
989 return
991 util.SMlog("Snapshot appears valid, will not roll back")
992 self._completeCloneOp(cowutil, vdis, origUuid, baseUuid, clonUuid)
994 def _undoCloneOp(self, cowutil, lvs, origUuid, baseUuid, clonUuid):
995 base = lvs[baseUuid]
996 basePath = os.path.join(self.path, base.name)
998 # make the parent RW
999 if base.readonly:
1000 self.lvmCache.setReadonly(base.name, False)
1002 ns = NS_PREFIX_LVM + self.uuid
1003 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
1004 origRefcountNormal = 0
1006 # un-hide the parent
1007 if VdiType.isCowImage(base.vdiType):
1008 self.lvActivator.activate(baseUuid, base.name, False)
1009 origRefcountNormal = 1
1010 imageInfo = cowutil.getInfo(basePath, LvmCowUtil.extractUuid, False)
1011 if imageInfo.hidden:
1012 cowutil.setHidden(basePath, False)
1013 elif base.hidden:
1014 self.lvmCache.setHidden(base.name, False)
1016 # remove the child nodes
1017 if clonUuid and lvs.get(clonUuid):
1018 if not VdiType.isCowImage(lvs[clonUuid].vdiType):
1019 raise util.SMException("clone %s not a COW image" % clonUuid)
1020 self.lvmCache.remove(lvs[clonUuid].name)
1021 if self.lvActivator.get(clonUuid, False):
1022 self.lvActivator.remove(clonUuid, False)
1023 if lvs.get(origUuid):
1024 self.lvmCache.remove(lvs[origUuid].name)
1026 # inflate the parent to fully-allocated size
1027 if VdiType.isCowImage(base.vdiType):
1028 lvmcowutil = LvmCowUtil(cowutil)
1029 fullSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1030 lvmcowutil.inflate(self.journaler, self.uuid, baseUuid, base.vdiType, fullSize)
1032 # rename back
1033 origLV = LV_PREFIX[base.vdiType] + origUuid
1034 self.lvmCache.rename(base.name, origLV)
1035 RefCounter.reset(baseUuid, ns)
1036 if self.lvActivator.get(baseUuid, False):
1037 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
1038 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1040 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1041 # flag to facilitate vm deactivate
1042 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1043 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1045 # update LVM metadata on slaves
1046 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1047 LvmCowUtil.refreshVolumeOnSlaves(self.session, self.uuid, self.vgname,
1048 origLV, origUuid, slaves)
1050 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1052 def _completeCloneOp(self, cowutil, vdis, origUuid, baseUuid, clonUuid):
1053 """Finalize the interrupted snapshot/clone operation. This must not be
1054 called from the live snapshot op context because we attempt to pause/
1055 unpause the VBD here (the VBD is already paused during snapshot, so it
1056 would cause a deadlock)"""
1057 base = vdis[baseUuid]
1058 clon = None
1059 if clonUuid:
1060 clon = vdis[clonUuid]
1062 cleanup.abort(self.uuid)
1064 # make sure the parent is hidden and read-only
1065 if not base.hidden:
1066 if not VdiType.isCowImage(base.vdiType):
1067 self.lvmCache.setHidden(base.lvName)
1068 else:
1069 basePath = os.path.join(self.path, base.lvName)
1070 cowutil.setHidden(basePath)
1071 if not base.lvReadonly:
1072 self.lvmCache.setReadonly(base.lvName, True)
1074 # NB: since this snapshot-preserving call is only invoked outside the
1075 # snapshot op context, we assume the LVM metadata on the involved slave
1076 # has by now been refreshed and do not attempt to do it here
1078 # Update the original record
1079 try:
1080 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1081 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1082 type = self.session.xenapi.VDI.get_type(vdi_ref)
1083 sm_config["vdi_type"] = vdis[origUuid].vdiType
1084 sm_config['vhd-parent'] = baseUuid
1085 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1086 except XenAPI.Failure:
1087 util.SMlog("ERROR updating the orig record")
1089 # introduce the new VDI records
1090 if clonUuid:
1091 try:
1092 clon_vdi = VDI.VDI(self, clonUuid)
1093 clon_vdi.read_only = False
1094 clon_vdi.location = clonUuid
1095 clon_vdi.utilisation = clon.sizeLV
1096 clon_vdi.sm_config = {
1097 "vdi_type": clon.vdiType,
1098 "vhd-parent": baseUuid}
1100 if not self.legacyMode:
1101 LVMMetadataHandler(self.mdpath). \
1102 ensureSpaceIsAvailableForVdis(1)
1104 clon_vdi_ref = clon_vdi._db_introduce()
1105 util.SMlog("introduced clon VDI: %s (%s)" % \
1106 (clon_vdi_ref, clonUuid))
1108 vdi_info = {UUID_TAG: clonUuid,
1109 NAME_LABEL_TAG: clon_vdi.label,
1110 NAME_DESCRIPTION_TAG: clon_vdi.description,
1111 IS_A_SNAPSHOT_TAG: 0,
1112 SNAPSHOT_OF_TAG: '',
1113 SNAPSHOT_TIME_TAG: '',
1114 TYPE_TAG: type,
1115 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1116 READ_ONLY_TAG: int(clon_vdi.read_only),
1117 MANAGED_TAG: int(clon_vdi.managed),
1118 METADATA_OF_POOL_TAG: ''
1119 }
1121 if not self.legacyMode:
1122 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1124 except XenAPI.Failure:
1125 util.SMlog("ERROR introducing the clon record")
1127 try:
1128 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1129 base_vdi.label = "base copy"
1130 base_vdi.read_only = True
1131 base_vdi.location = baseUuid
1132 base_vdi.size = base.sizeVirt
1133 base_vdi.utilisation = base.sizeLV
1134 base_vdi.managed = False
1135 base_vdi.sm_config = {
1136 "vdi_type": base.vdiType,
1137 "vhd-parent": baseUuid}
1139 if not self.legacyMode:
1140 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1142 base_vdi_ref = base_vdi._db_introduce()
1143 util.SMlog("introduced base VDI: %s (%s)" % \
1144 (base_vdi_ref, baseUuid))
1146 vdi_info = {UUID_TAG: baseUuid,
1147 NAME_LABEL_TAG: base_vdi.label,
1148 NAME_DESCRIPTION_TAG: base_vdi.description,
1149 IS_A_SNAPSHOT_TAG: 0,
1150 SNAPSHOT_OF_TAG: '',
1151 SNAPSHOT_TIME_TAG: '',
1152 TYPE_TAG: type,
1153 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1154 READ_ONLY_TAG: int(base_vdi.read_only),
1155 MANAGED_TAG: int(base_vdi.managed),
1156 METADATA_OF_POOL_TAG: ''
1157 }
1159 if not self.legacyMode:
1160 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1161 except XenAPI.Failure:
1162 util.SMlog("ERROR introducing the base record")
1164 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1166 def _undoAllJournals(self):
1167 """Undo all COW image & SM interrupted journaled operations. This call must
1168 be serialized with respect to all operations that create journals"""
1169 # undoing interrupted inflates must be done first, since undoing COW images
1170 # ops might require inflations
1171 self.lock.acquire()
1172 try:
1173 self._undoAllInflateJournals()
1174 self._undoAllCowJournals()
1175 self._handleInterruptedCloneOps()
1176 self._handleInterruptedCoalesceLeaf()
1177 finally:
1178 self.lock.release()
1179 self.cleanup()
1181 def _undoAllInflateJournals(self):
1182 entries = self.journaler.getAll(LvmCowUtil.JOURNAL_INFLATE)
1183 if len(entries) == 0:
1184 return
1185 self._loadvdis()
1186 for uuid, val in entries.items():
1187 vdi = self.vdis.get(uuid)
1188 if vdi: 1188 ↛ 1208line 1188 didn't jump to line 1208, because the condition on line 1188 was never false
1189 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1190 (uuid, vdi.path, val))
1191 if vdi.readonly: 1191 ↛ 1192line 1191 didn't jump to line 1192, because the condition on line 1191 was never true
1192 self.lvmCache.setReadonly(vdi.lvname, False)
1193 self.lvActivator.activate(uuid, vdi.lvname, False)
1194 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1196 cowutil = getCowUtil(vdi.vdi_type)
1197 lvmcowutil = LvmCowUtil(cowutil)
1199 footer_size = cowutil.getFooterSize()
1200 util.zeroOut(vdi.path, currSizeLV - footer_size, footer_size)
1201 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(val))
1202 if vdi.readonly: 1202 ↛ 1203line 1202 didn't jump to line 1203, because the condition on line 1202 was never true
1203 self.lvmCache.setReadonly(vdi.lvname, True)
1204 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1204 ↛ 1205line 1204 didn't jump to line 1205, because the condition on line 1204 was never true
1205 LvmCowUtil.refreshVolumeOnAllSlaves(
1206 self.session, self.uuid, self.vgname, vdi.lvname, uuid
1207 )
1208 self.journaler.remove(LvmCowUtil.JOURNAL_INFLATE, uuid)
1209 delattr(self, "vdiInfo")
1210 delattr(self, "allVDIs")
1212 def _undoAllCowJournals(self):
1213 """
1214 Check if there are COW journals in existence and revert them.
1215 """
1216 journals = LvmCowUtil.getAllResizeJournals(self.lvmCache)
1217 if len(journals) == 0: 1217 ↛ 1219line 1217 didn't jump to line 1219, because the condition on line 1217 was never false
1218 return
1219 self._loadvdis()
1221 for uuid, jlvName in journals:
1222 vdi = self.vdis[uuid]
1223 util.SMlog("Found COW journal %s, reverting %s" % (uuid, vdi.path))
1224 cowutil = getCowUtil(vdi.vdi_type)
1225 lvmcowutil = LvmCowUtil(cowutil)
1227 self.lvActivator.activate(uuid, vdi.lvname, False)
1228 self.lvmCache.activateNoRefcount(jlvName)
1229 fullSize = lvmcowutil.calcVolumeSize(vdi.size)
1230 lvmcowutil.inflate(self.journaler, self.uuid, vdi.uuid, vdi.vdi_type, fullSize)
1231 try:
1232 jFile = os.path.join(self.path, jlvName)
1233 cowutil.revert(vdi.path, jFile)
1234 except util.CommandException:
1235 util.logException("COW journal revert")
1236 cowutil.check(vdi.path)
1237 util.SMlog("COW image revert failed but COW image ok: removing journal")
1238 # Attempt to reclaim unused space
1241 imageInfo = cowutil.getInfo(vdi.path, LvmCowUtil.extractUuid, False)
1242 NewSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1243 if NewSize < fullSize:
1244 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1245 LvmCowUtil.refreshVolumeOnAllSlaves(self.session, self.uuid, self.vgname, vdi.lvname, uuid)
1246 self.lvmCache.remove(jlvName)
1247 delattr(self, "vdiInfo")
1248 delattr(self, "allVDIs")
1250 def call_on_slave(self, args, host_refs, message: str):
1251 master_ref = util.get_this_host_ref(self.session)
1252 for hostRef in host_refs:
1253 if hostRef == master_ref: 1253 ↛ 1254line 1253 didn't jump to line 1254, because the condition on line 1253 was never true
1254 continue
1255 util.SMlog(f"{message} on slave {hostRef}")
1256 rv = self.session.xenapi.host.call_plugin(
1257 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1258 util.SMlog("call-plugin returned: %s" % rv)
1259 if not rv: 1259 ↛ 1260line 1259 didn't jump to line 1260, because the condition on line 1259 was never true
1260 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1262 def _updateSlavesPreClone(self, hostRefs, origOldLV):
1263 args = {"vgName": self.vgname,
1264 "action1": "deactivateNoRefcount",
1265 "lvName1": origOldLV}
1266 message = "Deactivate VDI"
1267 self.call_on_slave(args, hostRefs, message)
1269 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1270 baseUuid, baseLV):
1271 """We need to reactivate the original LV on each slave (note that the
1272 name for the original LV might change), as well as init the refcount
1273 for the base LV"""
1274 args = {"vgName": self.vgname,
1275 "action1": "refresh",
1276 "lvName1": origLV,
1277 "action2": "activate",
1278 "ns2": NS_PREFIX_LVM + self.uuid,
1279 "lvName2": baseLV,
1280 "uuid2": baseUuid}
1282 message = f"Updating {origOldLV}, {origLV}, {baseLV}"
1283 self.call_on_slave(args, hostRefs, message)
1285 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1286 """Reactivate and refresh CBT log file on slaves"""
1287 args = {"vgName": self.vgname,
1288 "action1": "deactivateNoRefcount",
1289 "lvName1": cbtlog,
1290 "action2": "refresh",
1291 "lvName2": cbtlog}
1293 message = f"Updating {cbtlog}"
1294 self.call_on_slave(args, hostRefs, message)
1296 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1297 """Tell the slave we deleted the base image"""
1298 args = {"vgName": self.vgname,
1299 "action1": "cleanupLockAndRefcount",
1300 "uuid1": baseUuid,
1301 "ns1": NS_PREFIX_LVM + self.uuid}
1303 message = f"Cleaning locks for {baseLV}"
1304 self.call_on_slave(args, hostRefs, message)
1306 def _cleanup(self, skipLockCleanup=False):
1307 """delete stale refcounter, flag, and lock files"""
1308 RefCounter.resetAll(NS_PREFIX_LVM + self.uuid)
1309 IPCFlag(self.uuid).clearAll()
1310 if not skipLockCleanup: 1310 ↛ 1311line 1310 didn't jump to line 1311, because the condition on line 1310 was never true
1311 lock.Lock.cleanupAll(self.uuid)
1312 lock.Lock.cleanupAll(NS_PREFIX_LVM + self.uuid)
1314 def _prepareTestMode(self):
1315 util.SMlog("Test mode: %s" % self.testMode)
1316 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1316 ↛ 1317line 1316 didn't jump to line 1317, because the condition on line 1316 was never true
1317 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1318 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1320 def _kickGC(self):
1321 util.SMlog("Kicking GC")
1322 cleanup.start_gc_service(self.uuid)
1324 def ensureCBTSpace(self, virtual_size=0):
1325 # Ensure we have space for at least one LV
1326 size = max(util.roundup(CBT_BLOCK_SIZE, virtual_size//CBT_BLOCK_SIZE), self.journaler.LV_SIZE)
1327 self._ensureSpaceAvailable(size)
1330class LVMVDI(VDI.VDI):
1332 JRN_CLONE = "clone" # journal entry type for the clone operation
1334 @override
1335 def load(self, vdi_uuid) -> None:
1336 self.lock = self.sr.lock
1337 self.lvActivator = self.sr.lvActivator
1338 self.loaded = False
1339 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1339 ↛ 1341line 1339 didn't jump to line 1341, because the condition on line 1339 was never false
1340 self._setType(VdiType.RAW)
1341 self.uuid = vdi_uuid
1342 self.location = self.uuid
1343 self.exists = True
1345 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1346 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1347 if self.parent: 1347 ↛ 1348line 1347 didn't jump to line 1348, because the condition on line 1347 was never true
1348 self.sm_config_override['vhd-parent'] = self.parent
1349 else:
1350 self.sm_config_override['vhd-parent'] = None
1351 return
1353 # scan() didn't run: determine the type of the VDI manually
1354 if self._determineType(): 1354 ↛ 1358line 1354 didn't jump to line 1358, because the condition on line 1354 was never false
1355 return
1357 # the VDI must be in the process of being created
1358 self.exists = False
1360 vdi_sm_config = self.sr.srcmd.params.get("vdi_sm_config")
1361 if vdi_sm_config:
1362 image_format = vdi_sm_config.get("image-format") or vdi_sm_config.get("type")
1363 if image_format:
1364 try:
1365 self._setType(CREATE_PARAM_TYPES[image_format])
1366 except:
1367 raise xs_errors.XenError('VDICreate', opterr='bad image format')
1368 if self.sr.legacyMode and self.sr.cmd == 'vdi_create' and VdiType.isCowImage(self.vdi_type):
1369 raise xs_errors.XenError('VDICreate', opterr='Cannot create COW type disk in legacy mode')
1371 if not self.vdi_type:
1372 self._setType(getVdiTypeFromImageFormat(self.sr.preferred_image_formats[0]))
1374 self.lvname = "%s%s" % (LV_PREFIX[self.vdi_type], vdi_uuid)
1375 self.path = os.path.join(self.sr.path, self.lvname)
1377 @override
1378 def create(self, sr_uuid, vdi_uuid, size) -> str:
1379 util.SMlog("LVMVDI.create for %s" % self.uuid)
1380 if not self.sr.isMaster:
1381 raise xs_errors.XenError('LVMMaster')
1382 if self.exists:
1383 raise xs_errors.XenError('VDIExists')
1385 size = self.cowutil.validateAndRoundImageSize(int(size))
1387 util.SMlog("LVMVDI.create: type = %s, %s (size=%s)" % \
1388 (self.vdi_type, self.path, size))
1389 lvSize = 0
1390 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1391 if not VdiType.isCowImage(self.vdi_type):
1392 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1393 else:
1394 if self.sr.provision == "thin":
1395 lvSize = util.roundup(
1396 lvutil.LVM_SIZE_INCREMENT,
1397 self.cowutil.calcOverheadEmpty(max(size, self.cowutil.getDefaultPreallocationSizeVirt()))
1398 )
1399 elif self.sr.provision == "thick":
1400 lvSize = self.lvmcowutil.calcVolumeSize(int(size))
1402 self.sr._ensureSpaceAvailable(lvSize)
1404 try:
1405 self.sr.lvmCache.create(self.lvname, lvSize)
1406 if not VdiType.isCowImage(self.vdi_type):
1407 self.size = self.sr.lvmCache.getSize(self.lvname)
1408 else:
1409 self.cowutil.create(
1410 self.path, int(size), False, self.cowutil.getDefaultPreallocationSizeVirt()
1411 )
1412 self.size = self.cowutil.getSizeVirt(self.path)
1413 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1414 except util.CommandException as e:
1415 util.SMlog("Unable to create VDI")
1416 self.sr.lvmCache.remove(self.lvname)
1417 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1419 self.utilisation = lvSize
1420 self.sm_config["vdi_type"] = self.vdi_type
1421 self.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1423 if not self.sr.legacyMode:
1424 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1426 self.ref = self._db_introduce()
1427 self.sr._updateStats(self.sr.uuid, self.size)
1429 vdi_info = {UUID_TAG: self.uuid,
1430 NAME_LABEL_TAG: util.to_plain_string(self.label),
1431 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1432 IS_A_SNAPSHOT_TAG: 0,
1433 SNAPSHOT_OF_TAG: '',
1434 SNAPSHOT_TIME_TAG: '',
1435 TYPE_TAG: self.ty,
1436 VDI_TYPE_TAG: self.vdi_type,
1437 READ_ONLY_TAG: int(self.read_only),
1438 MANAGED_TAG: int(self.managed),
1439 METADATA_OF_POOL_TAG: ''
1440 }
1442 if not self.sr.legacyMode:
1443 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1445 return VDI.VDI.get_params(self)
1447 @override
1448 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None:
1449 util.SMlog("LVMVDI.delete for %s" % self.uuid)
1450 try:
1451 self._loadThis()
1452 except xs_errors.SRException as e:
1453 # Catch 'VDI doesn't exist' exception
1454 if e.errno == 46:
1455 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1456 raise
1458 vdi_ref = self.sr.srcmd.params['vdi_ref']
1459 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1460 raise xs_errors.XenError("VDIDelete", \
1461 opterr="Deleting non-leaf node not permitted")
1463 if not self.hidden:
1464 self._markHidden()
1466 if not data_only:
1467 # Remove from XAPI and delete from MGT
1468 self._db_forget()
1469 else:
1470 # If this is a data_destroy call, don't remove from XAPI db
1471 # Only delete from MGT
1472 if not self.sr.legacyMode:
1473 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1475 # deactivate here because it might be too late to do it in the "final"
1476 # step: GC might have removed the LV by then
1477 if self.sr.lvActivator.get(self.uuid, False):
1478 self.sr.lvActivator.deactivate(self.uuid, False)
1480 try:
1481 self.sr.lvmCache.remove(self.lvname)
1482 self.sr.lock.cleanup(vdi_uuid, NS_PREFIX_LVM + sr_uuid)
1483 self.sr.lock.cleanupAll(vdi_uuid)
1484 except xs_errors.SRException as e:
1485 util.SMlog(
1486 "Failed to remove the volume (maybe is leaf coalescing) "
1487 "for %s err:%d" % (self.uuid, e.errno))
1489 self.sr._updateStats(self.sr.uuid, -self.size)
1490 self.sr._kickGC()
1491 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1493 @override
1494 def attach(self, sr_uuid, vdi_uuid) -> str:
1495 util.SMlog("LVMVDI.attach for %s" % self.uuid)
1496 if self.sr.journaler.hasJournals(self.uuid):
1497 raise xs_errors.XenError('VDIUnavailable',
1498 opterr='Interrupted operation detected on this VDI, '
1499 'scan SR first to trigger auto-repair')
1501 writable = ('args' not in self.sr.srcmd.params) or \
1502 (self.sr.srcmd.params['args'][0] == "true")
1503 needInflate = True
1504 if not VdiType.isCowImage(self.vdi_type) or not writable:
1505 needInflate = False
1506 else:
1507 self._loadThis()
1508 if self.utilisation >= self.lvmcowutil.calcVolumeSize(self.size):
1509 needInflate = False
1511 if needInflate:
1512 try:
1513 self._prepareThin(True, self.vdi_type)
1514 except:
1515 util.logException("attach")
1516 raise xs_errors.XenError('LVMProvisionAttach')
1518 try:
1519 return self._attach()
1520 finally:
1521 if not self.sr.lvActivator.deactivateAll():
1522 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1524 @override
1525 def detach(self, sr_uuid, vdi_uuid) -> None:
1526 util.SMlog("LVMVDI.detach for %s" % self.uuid)
1527 self._loadThis()
1528 already_deflated = (self.utilisation < \
1529 self.lvmcowutil.calcVolumeSize(self.size))
1530 needDeflate = True
1531 if not VdiType.isCowImage(self.vdi_type) or already_deflated:
1532 needDeflate = False
1533 elif self.sr.provision == "thick":
1534 needDeflate = False
1535 # except for snapshots, which are always deflated
1536 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1537 vdi_ref = self.sr.srcmd.params['vdi_ref']
1538 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1539 if snap:
1540 needDeflate = True
1542 if needDeflate:
1543 try:
1544 self._prepareThin(False, self.vdi_type)
1545 except:
1546 util.logException("_prepareThin")
1547 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1549 try:
1550 self._detach()
1551 finally:
1552 if not self.sr.lvActivator.deactivateAll():
1553 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1555 # We only support offline resize
1556 @override
1557 def resize(self, sr_uuid, vdi_uuid, size) -> str:
1558 util.SMlog("LVMVDI.resize for %s" % self.uuid)
1559 if not self.sr.isMaster:
1560 raise xs_errors.XenError('LVMMaster')
1562 self._loadThis()
1563 if self.hidden:
1564 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1566 if size < self.size:
1567 util.SMlog('vdi_resize: shrinking not supported: ' + \
1568 '(current size: %d, new size: %d)' % (self.size, size))
1569 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1571 size = self.cowutil.validateAndRoundImageSize(int(size))
1573 if size == self.size:
1574 return VDI.VDI.get_params(self)
1576 if not VdiType.isCowImage(self.vdi_type):
1577 lvSizeOld = self.size
1578 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1579 else:
1580 lvSizeOld = self.utilisation
1581 lvSizeNew = self.lvmcowutil.calcVolumeSize(size)
1582 if self.sr.provision == "thin":
1583 # VDI is currently deflated, so keep it deflated
1584 lvSizeNew = lvSizeOld
1585 assert(lvSizeNew >= lvSizeOld)
1586 spaceNeeded = lvSizeNew - lvSizeOld
1587 self.sr._ensureSpaceAvailable(spaceNeeded)
1589 oldSize = self.size
1590 if not VdiType.isCowImage(self.vdi_type):
1591 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1592 self.size = self.sr.lvmCache.getSize(self.lvname)
1593 self.utilisation = self.size
1594 else:
1595 if lvSizeNew != lvSizeOld:
1596 self.lvmcowutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type, lvSizeNew)
1597 self.cowutil.setSizeVirtFast(self.path, size)
1598 self.size = self.cowutil.getSizeVirt(self.path)
1599 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1601 vdi_ref = self.sr.srcmd.params['vdi_ref']
1602 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1603 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1604 str(self.utilisation))
1605 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1606 super(LVMVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1607 return VDI.VDI.get_params(self)
1609 @override
1610 def clone(self, sr_uuid, vdi_uuid) -> str:
1611 return self._do_snapshot(
1612 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1614 @override
1615 def compose(self, sr_uuid, vdi1, vdi2) -> None:
1616 util.SMlog("LVMSR.compose for %s -> %s" % (vdi2, vdi1))
1617 if not VdiType.isCowImage(self.vdi_type):
1618 raise xs_errors.XenError('Unimplemented')
1620 parent_uuid = vdi1
1621 parent_lvname = LV_PREFIX[self.vdi_type] + parent_uuid
1622 assert(self.sr.lvmCache.checkLV(parent_lvname))
1623 parent_path = os.path.join(self.sr.path, parent_lvname)
1625 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1626 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1628 self.cowutil.setParent(self.path, parent_path, False)
1629 self.cowutil.setHidden(parent_path)
1630 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1632 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1633 True):
1634 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1636 util.SMlog("Compose done")
1638 def reset_leaf(self, sr_uuid, vdi_uuid):
1639 util.SMlog("LVMSR.reset_leaf for %s" % vdi_uuid)
1640 if not VdiType.isCowImage(self.vdi_type):
1641 raise xs_errors.XenError('Unimplemented')
1643 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1645 # safety check
1646 if not self.cowutil.hasParent(self.path):
1647 raise util.SMException("ERROR: VDI %s has no parent, " + \
1648 "will not reset contents" % self.uuid)
1650 self.cowutil.killData(self.path)
1652 def _attach(self):
1653 self._chainSetActive(True, True, True)
1654 if not util.pathexists(self.path):
1655 raise xs_errors.XenError('VDIUnavailable', \
1656 opterr='Could not find: %s' % self.path)
1658 if not hasattr(self, 'xenstore_data'):
1659 self.xenstore_data = {}
1661 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1662 scsiutil.gen_synthetic_page_data(self.uuid)))
1664 self.xenstore_data['storage-type'] = 'lvm'
1665 self.xenstore_data['vdi-type'] = self.vdi_type
1667 self.attached = True
1668 self.sr.lvActivator.persist()
1669 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1671 def _detach(self):
1672 self._chainSetActive(False, True)
1673 self.attached = False
1675 @override
1676 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1677 cloneOp=False, secondary=None, cbtlog=None, is_mirror_destination=False) -> str:
1678 # If cbt enabled, save file consistency state
1679 if cbtlog is not None:
1680 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1680 ↛ 1681line 1680 didn't jump to line 1681, because the condition on line 1680 was never true
1681 consistency_state = False
1682 else:
1683 consistency_state = True
1684 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1685 (consistency_state, vdi_uuid))
1686 else:
1687 consistency_state = None
1689 pause_time = time.time()
1690 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1690 ↛ 1691line 1690 didn't jump to line 1691, because the condition on line 1690 was never true
1691 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1693 snapResult = None
1694 try:
1695 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state, is_mirror_destination)
1696 except Exception as e1:
1697 try:
1698 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1699 secondary=None)
1700 except Exception as e2:
1701 util.SMlog('WARNING: failed to clean up failed snapshot: '
1702 '%s (error ignored)' % e2)
1703 raise
1704 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary)
1705 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1706 unpause_time = time.time()
1707 if (unpause_time - pause_time) > LONG_SNAPTIME: 1707 ↛ 1708line 1707 didn't jump to line 1708, because the condition on line 1707 was never true
1708 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1709 (unpause_time - pause_time))
1710 return snapResult
1712 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None, is_mirror_destination=False):
1713 util.SMlog("LVMVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1715 if not self.sr.isMaster: 1715 ↛ 1716line 1715 didn't jump to line 1716, because the condition on line 1715 was never true
1716 raise xs_errors.XenError('LVMMaster')
1717 if self.sr.legacyMode: 1717 ↛ 1718line 1717 didn't jump to line 1718, because the condition on line 1717 was never true
1718 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1720 self._loadThis()
1721 if self.hidden: 1721 ↛ 1722line 1721 didn't jump to line 1722, because the condition on line 1721 was never true
1722 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1724 snapVdiType = self.sr._get_snap_vdi_type(self.vdi_type, self.size)
1726 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1727 self.sr.srcmd.params['vdi_ref'])
1728 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1728 ↛ 1729line 1728 didn't jump to line 1729, because the condition on line 1728 was never true
1729 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1730 raise xs_errors.XenError('Unimplemented', \
1731 opterr='Raw VDI, snapshot or clone not permitted')
1733 # we must activate the entire image chain because the real parent could
1734 # theoretically be anywhere in the chain if all images under it are empty
1735 self._chainSetActive(True, False)
1736 if not util.pathexists(self.path): 1736 ↛ 1737line 1736 didn't jump to line 1737, because the condition on line 1736 was never true
1737 raise xs_errors.XenError('VDIUnavailable', \
1738 opterr='VDI unavailable: %s' % (self.path))
1740 if VdiType.isCowImage(self.vdi_type): 1740 ↛ 1748line 1740 didn't jump to line 1748, because the condition on line 1740 was never false
1741 depth = self.cowutil.getDepth(self.path)
1742 if depth == -1: 1742 ↛ 1743line 1742 didn't jump to line 1743, because the condition on line 1742 was never true
1743 raise xs_errors.XenError('VDIUnavailable', \
1744 opterr='failed to get COW depth')
1745 elif depth >= self.cowutil.getMaxChainLength(): 1745 ↛ 1746line 1745 didn't jump to line 1746, because the condition on line 1745 was never true
1746 raise xs_errors.XenError('SnapshotChainTooLong')
1748 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1749 self.sr.srcmd.params['vdi_ref'])
1751 fullpr = self.lvmcowutil.calcVolumeSize(self.size)
1752 thinpr = util.roundup(
1753 lvutil.LVM_SIZE_INCREMENT,
1754 self.cowutil.calcOverheadEmpty(max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()))
1755 )
1756 lvSizeOrig = thinpr
1757 lvSizeClon = thinpr
1759 hostRefs = []
1760 if self.sr.cmd == "vdi_snapshot":
1761 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1762 if hostRefs: 1762 ↛ 1764line 1762 didn't jump to line 1764, because the condition on line 1762 was never false
1763 lvSizeOrig = fullpr
1764 if self.sr.provision == "thick": 1764 ↛ 1770line 1764 didn't jump to line 1770, because the condition on line 1764 was never false
1765 if not self.issnap: 1765 ↛ 1766line 1765 didn't jump to line 1766, because the condition on line 1765 was never true
1766 lvSizeOrig = fullpr
1767 if self.sr.cmd != "vdi_snapshot":
1768 lvSizeClon = fullpr
1770 if (snapType == VDI.SNAPSHOT_SINGLE or 1770 ↛ 1772line 1770 didn't jump to line 1772, because the condition on line 1770 was never true
1771 snapType == VDI.SNAPSHOT_INTERNAL):
1772 lvSizeClon = 0
1774 # the space required must include 2 journal LVs: a clone journal and an
1775 # inflate journal (for the failure handling
1776 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1777 lvSizeBase = self.size
1778 if VdiType.isCowImage(self.vdi_type): 1778 ↛ 1781line 1778 didn't jump to line 1781, because the condition on line 1778 was never false
1779 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, self.cowutil.getSizePhys(self.path))
1780 size_req -= (self.utilisation - lvSizeBase)
1781 self.sr._ensureSpaceAvailable(size_req)
1783 if hostRefs:
1784 self.sr._updateSlavesPreClone(hostRefs, self.lvname)
1786 baseUuid = util.gen_uuid()
1787 origUuid = self.uuid
1788 clonUuid = ""
1789 if snapType == VDI.SNAPSHOT_DOUBLE: 1789 ↛ 1791line 1789 didn't jump to line 1791, because the condition on line 1789 was never false
1790 clonUuid = util.gen_uuid()
1791 jval = "%s_%s" % (baseUuid, clonUuid)
1792 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1793 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1795 try:
1796 # self becomes the "base vdi"
1797 origOldLV = self.lvname
1798 baseLV = LV_PREFIX[self.vdi_type] + baseUuid
1799 self.sr.lvmCache.rename(self.lvname, baseLV)
1800 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1801 RefCounter.set(baseUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1802 self.uuid = baseUuid
1803 self.lvname = baseLV
1804 self.path = os.path.join(self.sr.path, baseLV)
1805 self.label = "base copy"
1806 self.read_only = True
1807 self.location = self.uuid
1808 self.managed = False
1810 # shrink the base copy to the minimum - we do it before creating
1811 # the snapshot volumes to avoid requiring double the space
1812 if VdiType.isCowImage(self.vdi_type): 1812 ↛ 1815line 1812 didn't jump to line 1815, because the condition on line 1812 was never false
1813 self.lvmcowutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1814 self.utilisation = lvSizeBase
1815 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1817 snapVDI = self._createSnap(origUuid, snapVdiType, lvSizeOrig, False, is_mirror_destination)
1818 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1819 snapVDI2 = None
1820 if snapType == VDI.SNAPSHOT_DOUBLE: 1820 ↛ 1826line 1820 didn't jump to line 1826, because the condition on line 1820 was never false
1821 snapVDI2 = self._createSnap(clonUuid, snapVdiType, lvSizeClon, True)
1822 # If we have CBT enabled on the VDI,
1823 # set CBT status for the new snapshot disk
1824 if cbtlog:
1825 snapVDI2.cbt_enabled = True
1826 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1828 # note: it is important to mark the parent hidden only AFTER the
1829 # new image children have been created, which are referencing it;
1830 # otherwise we would introduce a race with GC that could reclaim
1831 # the parent before we snapshot it
1832 if not VdiType.isCowImage(self.vdi_type): 1832 ↛ 1833line 1832 didn't jump to line 1833, because the condition on line 1832 was never true
1833 self.sr.lvmCache.setHidden(self.lvname)
1834 else:
1835 self.cowutil.setHidden(self.path)
1836 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1838 # set the base copy to ReadOnly
1839 self.sr.lvmCache.setReadonly(self.lvname, True)
1840 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1842 if hostRefs:
1843 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1844 snapVDI.lvname, self.uuid, self.lvname)
1846 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1847 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1848 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1849 if hostRefs: 1849 ↛ 1863line 1849 didn't jump to line 1863, because the condition on line 1849 was never false
1850 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1851 try:
1852 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1853 except:
1854 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1855 alert_str = ("Creating CBT snapshot for {} failed"
1856 .format(snapVDI.uuid))
1857 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1858 pass
1860 except (util.SMException, XenAPI.Failure) as e:
1861 util.logException("LVMVDI._snapshot")
1862 self._failClone(origUuid, jval, str(e))
1863 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1865 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1867 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1869 def _createSnap(self, snapUuid, snapVdiType, snapSizeLV, isNew, is_mirror_destination=False):
1870 """Snapshot self and return the snapshot VDI object"""
1872 snapLV = LV_PREFIX[snapVdiType] + snapUuid
1873 snapPath = os.path.join(self.sr.path, snapLV)
1874 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1875 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1876 if isNew:
1877 RefCounter.set(snapUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1878 self.sr.lvActivator.add(snapUuid, snapLV, False)
1879 parentRaw = (self.vdi_type == VdiType.RAW)
1880 self.cowutil.snapshot(
1881 snapPath, self.path, parentRaw, max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()), is_mirror_image=is_mirror_destination
1882 )
1883 snapParent = self.cowutil.getParent(snapPath, LvmCowUtil.extractUuid)
1885 snapVDI = LVMVDI(self.sr, snapUuid)
1886 snapVDI.read_only = False
1887 snapVDI.location = snapUuid
1888 snapVDI.size = self.size
1889 snapVDI.utilisation = snapSizeLV
1890 snapVDI.sm_config = dict()
1891 for key, val in self.sm_config.items(): 1891 ↛ 1892line 1891 didn't jump to line 1892, because the loop on line 1891 never started
1892 if key not in [
1893 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1894 not key.startswith("host_"):
1895 snapVDI.sm_config[key] = val
1896 snapVDI.sm_config["vdi_type"] = snapVdiType
1897 snapVDI.sm_config["vhd-parent"] = snapParent
1898 # TODO: fix the raw snapshot case
1899 snapVDI.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1900 snapVDI.lvname = snapLV
1901 return snapVDI
1903 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1904 if snapType is not VDI.SNAPSHOT_INTERNAL: 1904 ↛ 1906line 1904 didn't jump to line 1906, because the condition on line 1904 was never false
1905 self.sr._updateStats(self.sr.uuid, self.size)
1906 basePresent = True
1908 # Verify parent locator field of both children and delete basePath if
1909 # unused
1910 snapParent = snapVDI.sm_config["vhd-parent"]
1911 snap2Parent = ""
1912 if snapVDI2: 1912 ↛ 1914line 1912 didn't jump to line 1914, because the condition on line 1912 was never false
1913 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1914 if snapParent != self.uuid and \ 1914 ↛ 1941line 1914 didn't jump to line 1941, because the condition on line 1914 was never false
1915 (not snapVDI2 or snap2Parent != self.uuid):
1916 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1917 (snapParent, self.uuid, snap2Parent, self.lvname))
1918 RefCounter.put(self.uuid, False, NS_PREFIX_LVM + self.sr.uuid)
1919 self.sr.lvmCache.remove(self.lvname)
1920 self.sr.lvActivator.remove(self.uuid, False)
1921 if hostRefs:
1922 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1923 basePresent = False
1924 else:
1925 # assign the _binary_ refcount of the original VDI to the new base
1926 # VDI (but as the normal refcount, since binary refcounts are only
1927 # for leaf nodes). The normal refcount of the child is not
1928 # transferred to to the base VDI because normal refcounts are
1929 # incremented and decremented individually, and not based on the
1930 # image chain (i.e., the child's normal refcount will be decremented
1931 # independently of its parent situation). Add 1 for this clone op.
1932 # Note that we do not need to do protect the refcount operations
1933 # below with per-VDI locking like we do in lvutil because at this
1934 # point we have exclusive access to the VDIs involved. Other SM
1935 # operations are serialized by the Agent or with the SR lock, and
1936 # any coalesce activations are serialized with the SR lock. (The
1937 # coalesce activates the coalesced VDI pair in the beginning, which
1938 # cannot affect the VDIs here because they cannot possibly be
1939 # involved in coalescing at this point, and at the relinkSkip step
1940 # that activates the children, which takes the SR lock.)
1941 ns = NS_PREFIX_LVM + self.sr.uuid
1942 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1943 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1945 # the "paused" and "host_*" sm-config keys are special and must stay on
1946 # the leaf without being inherited by anyone else
1947 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1947 ↛ 1948line 1947 didn't jump to line 1948, because the loop on line 1947 never started
1948 snapVDI.sm_config[key] = self.sm_config[key]
1949 del self.sm_config[key]
1951 # Introduce any new VDI records & update the existing one
1952 type = self.session.xenapi.VDI.get_type( \
1953 self.sr.srcmd.params['vdi_ref'])
1954 if snapVDI2: 1954 ↛ 1996line 1954 didn't jump to line 1996, because the condition on line 1954 was never false
1955 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1956 vdiRef = snapVDI2._db_introduce()
1957 if cloneOp:
1958 vdi_info = {UUID_TAG: snapVDI2.uuid,
1959 NAME_LABEL_TAG: util.to_plain_string( \
1960 self.session.xenapi.VDI.get_name_label( \
1961 self.sr.srcmd.params['vdi_ref'])),
1962 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1963 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1964 IS_A_SNAPSHOT_TAG: 0,
1965 SNAPSHOT_OF_TAG: '',
1966 SNAPSHOT_TIME_TAG: '',
1967 TYPE_TAG: type,
1968 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1969 READ_ONLY_TAG: 0,
1970 MANAGED_TAG: int(snapVDI2.managed),
1971 METADATA_OF_POOL_TAG: ''
1972 }
1973 else:
1974 util.SMlog("snapshot VDI params: %s" % \
1975 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
1976 vdi_info = {UUID_TAG: snapVDI2.uuid,
1977 NAME_LABEL_TAG: util.to_plain_string( \
1978 self.session.xenapi.VDI.get_name_label( \
1979 self.sr.srcmd.params['vdi_ref'])),
1980 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1981 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1982 IS_A_SNAPSHOT_TAG: 1,
1983 SNAPSHOT_OF_TAG: snapVDI.uuid,
1984 SNAPSHOT_TIME_TAG: '',
1985 TYPE_TAG: type,
1986 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1987 READ_ONLY_TAG: 0,
1988 MANAGED_TAG: int(snapVDI2.managed),
1989 METADATA_OF_POOL_TAG: ''
1990 }
1992 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1993 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
1994 (vdiRef, snapVDI2.uuid))
1996 if basePresent: 1996 ↛ 1997line 1996 didn't jump to line 1997, because the condition on line 1996 was never true
1997 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1998 vdiRef = self._db_introduce()
1999 vdi_info = {UUID_TAG: self.uuid,
2000 NAME_LABEL_TAG: self.label,
2001 NAME_DESCRIPTION_TAG: self.description,
2002 IS_A_SNAPSHOT_TAG: 0,
2003 SNAPSHOT_OF_TAG: '',
2004 SNAPSHOT_TIME_TAG: '',
2005 TYPE_TAG: type,
2006 VDI_TYPE_TAG: self.sm_config['vdi_type'],
2007 READ_ONLY_TAG: 1,
2008 MANAGED_TAG: 0,
2009 METADATA_OF_POOL_TAG: ''
2010 }
2012 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
2013 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
2014 (vdiRef, self.uuid))
2016 # Update the original record
2017 vdi_ref = self.sr.srcmd.params['vdi_ref']
2018 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
2019 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
2020 str(snapVDI.utilisation))
2022 # Return the info on the new snap VDI
2023 snap = snapVDI2
2024 if not snap: 2024 ↛ 2025line 2024 didn't jump to line 2025, because the condition on line 2024 was never true
2025 snap = self
2026 if not basePresent:
2027 # a single-snapshot of an empty VDI will be a noop, resulting
2028 # in no new VDIs, so return the existing one. The GC wouldn't
2029 # normally try to single-snapshot an empty image of course, but
2030 # if an external snapshot operation manages to sneak in right
2031 # before a snapshot-coalesce phase, we would get here
2032 snap = snapVDI
2033 return snap.get_params()
2035 def _setType(self, vdiType: str) -> None:
2036 self.vdi_type = vdiType
2037 self.cowutil = getCowUtil(self.vdi_type)
2038 self.lvmcowutil = LvmCowUtil(self.cowutil)
2040 def _initFromVDIInfo(self, vdiInfo):
2041 self._setType(vdiInfo.vdiType)
2042 self.lvname = vdiInfo.lvName
2043 self.size = vdiInfo.sizeVirt
2044 self.utilisation = vdiInfo.sizeLV
2045 self.hidden = vdiInfo.hidden
2046 if self.hidden: 2046 ↛ 2047line 2046 didn't jump to line 2047, because the condition on line 2046 was never true
2047 self.managed = False
2048 self.active = vdiInfo.lvActive
2049 self.readonly = vdiInfo.lvReadonly
2050 self.parent = vdiInfo.parentUuid
2051 self.path = os.path.join(self.sr.path, self.lvname)
2052 if hasattr(self, "sm_config_override"): 2052 ↛ 2055line 2052 didn't jump to line 2055, because the condition on line 2052 was never false
2053 self.sm_config_override["vdi_type"] = self.vdi_type
2054 else:
2055 self.sm_config_override = {'vdi_type': self.vdi_type}
2056 self.loaded = True
2058 def _initFromLVInfo(self, lvInfo):
2059 self._setType(lvInfo.vdiType)
2060 self.lvname = lvInfo.name
2061 self.size = lvInfo.size
2062 self.utilisation = lvInfo.size
2063 self.hidden = lvInfo.hidden
2064 self.active = lvInfo.active
2065 self.readonly = lvInfo.readonly
2066 self.parent = ''
2067 self.path = os.path.join(self.sr.path, self.lvname)
2068 if hasattr(self, "sm_config_override"): 2068 ↛ 2071line 2068 didn't jump to line 2071, because the condition on line 2068 was never false
2069 self.sm_config_override["vdi_type"] = self.vdi_type
2070 else:
2071 self.sm_config_override = {'vdi_type': self.vdi_type}
2072 if 'vhd-parent' in self.sm_config_override: 2072 ↛ 2073line 2072 didn't jump to line 2073, because the condition on line 2072 was never true
2073 self.parent = self.sm_config_override['vhd-parent']
2074 if not VdiType.isCowImage(self.vdi_type): 2074 ↛ 2075line 2074 didn't jump to line 2075, because the condition on line 2074 was never true
2075 self.loaded = True
2077 def _initFromImageInfo(self, imageInfo):
2078 self.size = imageInfo.sizeVirt
2079 if self.parent == '' or (imageInfo.parentUuid != '' and imageInfo.parentUuid != self.parent): 2079 ↛ 2081line 2079 didn't jump to line 2081, because the condition on line 2079 was never false
2080 self.parent = imageInfo.parentUuid
2081 self.hidden = imageInfo.hidden
2082 self.loaded = True
2084 def _determineType(self):
2085 """
2086 Determine whether this is a RAW or a COW VDI.
2087 """
2088 if "vdi_ref" in self.sr.srcmd.params:
2089 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2090 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2091 if sm_config.get("vdi_type"): 2091 ↛ 2092line 2091 didn't jump to line 2092, because the condition on line 2091 was never true
2092 self._setType(sm_config["vdi_type"])
2093 prefix = LV_PREFIX[self.vdi_type]
2094 self.lvname = "%s%s" % (prefix, self.uuid)
2095 self.path = os.path.join(self.sr.path, self.lvname)
2096 self.sm_config_override = sm_config
2097 return True
2099 # LVM commands can be costly, so check the file directly first in case
2100 # the LV is active
2101 found = False
2102 for vdi_type, prefix in LV_PREFIX.items():
2103 lvname = "%s%s" % (prefix, self.uuid)
2104 path = os.path.join(self.sr.path, lvname)
2105 if util.pathexists(path):
2106 if found: 2106 ↛ 2107line 2106 didn't jump to line 2107, because the condition on line 2106 was never true
2107 raise xs_errors.XenError('VDILoad',
2108 opterr="multiple VDI's: uuid %s" % self.uuid)
2109 found = True
2110 self._setType(vdi_type)
2111 self.lvname = lvname
2112 self.path = path
2113 if found:
2114 return True
2116 # now list all LV's
2117 if not lvutil._checkVG(self.sr.vgname): 2117 ↛ 2119line 2117 didn't jump to line 2119, because the condition on line 2117 was never true
2118 # when doing attach_from_config, the VG won't be there yet
2119 return False
2121 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache)
2122 if lvs.get(self.uuid): 2122 ↛ 2125line 2122 didn't jump to line 2125, because the condition on line 2122 was never false
2123 self._initFromLVInfo(lvs[self.uuid])
2124 return True
2125 return False
2127 def _loadThis(self):
2128 """
2129 Load VDI info for this VDI and activate the LV if it's COW. We
2130 don't do it in VDI.load() because not all VDI operations need it.
2131 """
2132 if self.loaded: 2132 ↛ 2133line 2132 didn't jump to line 2133, because the condition on line 2132 was never true
2133 if VdiType.isCowImage(self.vdi_type):
2134 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2135 return
2136 try:
2137 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache, self.lvname)
2138 except util.CommandException as e:
2139 raise xs_errors.XenError('VDIUnavailable',
2140 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2141 if not lvs.get(self.uuid): 2141 ↛ 2142line 2141 didn't jump to line 2142, because the condition on line 2141 was never true
2142 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2143 self._initFromLVInfo(lvs[self.uuid])
2144 if VdiType.isCowImage(self.vdi_type): 2144 ↛ 2150line 2144 didn't jump to line 2150, because the condition on line 2144 was never false
2145 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2146 imageInfo = self.cowutil.getInfo(self.path, LvmCowUtil.extractUuid, False)
2147 if not imageInfo: 2147 ↛ 2148line 2147 didn't jump to line 2148, because the condition on line 2147 was never true
2148 raise xs_errors.XenError('VDIUnavailable', opterr='getInfo failed')
2149 self._initFromImageInfo(imageInfo)
2150 self.loaded = True
2152 def _chainSetActive(self, active, binary, persistent=False):
2153 if binary: 2153 ↛ 2154line 2153 didn't jump to line 2154, because the condition on line 2153 was never true
2154 (count, bcount) = RefCounter.checkLocked(self.uuid,
2155 NS_PREFIX_LVM + self.sr.uuid)
2156 if (active and bcount > 0) or (not active and bcount == 0):
2157 return # this is a redundant activation/deactivation call
2159 vdiList = {self.uuid: self.lvname}
2160 if VdiType.isCowImage(self.vdi_type): 2160 ↛ 2162line 2160 didn't jump to line 2162, because the condition on line 2160 was never false
2161 vdiList = self.cowutil.getParentChain(self.lvname, LvmCowUtil.extractUuid, self.sr.vgname)
2162 for uuid, lvName in vdiList.items(): 2162 ↛ 2163line 2162 didn't jump to line 2163, because the loop on line 2162 never started
2163 binaryParam = binary
2164 if uuid != self.uuid:
2165 binaryParam = False # binary param only applies to leaf nodes
2166 if active:
2167 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2168 persistent)
2169 else:
2170 # just add the LVs for deactivation in the final (cleanup)
2171 # step. The LVs must not have been activated during the current
2172 # operation
2173 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2175 def _failClone(self, uuid, jval, msg):
2176 try:
2177 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2178 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2179 except Exception as e:
2180 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2181 ' %s (error ignored)' % e)
2182 raise xs_errors.XenError('VDIClone', opterr=msg)
2184 def _markHidden(self):
2185 if not VdiType.isCowImage(self.vdi_type):
2186 self.sr.lvmCache.setHidden(self.lvname)
2187 else:
2188 self.cowutil.setHidden(self.path)
2189 self.hidden = 1
2191 def _prepareThin(self, attach, vdiType):
2192 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2193 if self.sr.isMaster:
2194 # the master can prepare the VDI locally
2195 if attach:
2196 self.lvmcowutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type)
2197 else:
2198 self.lvmcowutil.detachThin(self.session, self.sr.lvmCache, self.sr.uuid, self.uuid, self.vdi_type)
2199 else:
2200 fn = "attach"
2201 if not attach:
2202 fn = "detach"
2203 pools = self.session.xenapi.pool.get_all()
2204 master = self.session.xenapi.pool.get_master(pools[0])
2205 rv = self.session.xenapi.host.call_plugin(
2206 master,
2207 self.sr.THIN_PLUGIN,
2208 fn,
2209 {
2210 "srUuid": self.sr.uuid,
2211 "vdiUuid": self.uuid,
2212 "vdiType": vdiType
2213 }
2214 )
2215 util.SMlog("call-plugin returned: %s" % rv)
2216 if not rv:
2217 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2218 # refresh to pick up the size change on this slave
2219 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2221 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2222 if origUtilisation != self.utilisation:
2223 vdi_ref = self.sr.srcmd.params['vdi_ref']
2224 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2225 str(self.utilisation))
2226 stats = lvutil._getVGstats(self.sr.vgname)
2227 sr_utilisation = stats['physical_utilisation']
2228 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2229 str(sr_utilisation))
2231 @override
2232 def update(self, sr_uuid, vdi_uuid) -> None:
2233 if self.sr.legacyMode:
2234 return
2236 #Synch the name_label of this VDI on storage with the name_label in XAPI
2237 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2238 update_map = {}
2239 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2240 METADATA_OBJECT_TYPE_VDI
2241 update_map[UUID_TAG] = self.uuid
2242 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2243 self.session.xenapi.VDI.get_name_label(vdi_ref))
2244 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2245 self.session.xenapi.VDI.get_name_description(vdi_ref))
2246 update_map[SNAPSHOT_TIME_TAG] = \
2247 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2248 update_map[METADATA_OF_POOL_TAG] = \
2249 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2250 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2252 @override
2253 def _ensure_cbt_space(self) -> None:
2254 # We need virtual_size to compute the size in case of a bigger VDI
2255 self.sr.ensureCBTSpace(self.size)
2257 @override
2258 def _create_cbt_log(self) -> str:
2259 logname = self._get_cbt_logname(self.uuid)
2260 logsize = max(util.roundup(CBT_BLOCK_SIZE, self.size//CBT_BLOCK_SIZE), self.sr.journaler.LV_SIZE)
2261 # We choose 4MiB as the minimum for the log size to maintain the old behavior and compute the correct amount
2262 # if we need a bigger LV for the CBT (can happen with big QCOW2)
2263 self.sr.lvmCache.create(logname, logsize, CBTLOG_TAG)
2264 logpath = super(LVMVDI, self)._create_cbt_log()
2265 self.sr.lvmCache.deactivateNoRefcount(logname)
2266 return logpath
2268 @override
2269 def _delete_cbt_log(self) -> None:
2270 logpath = self._get_cbt_logpath(self.uuid)
2271 if self._cbt_log_exists(logpath):
2272 logname = self._get_cbt_logname(self.uuid)
2273 self.sr.lvmCache.remove(logname)
2275 @override
2276 def _rename(self, oldpath, newpath) -> None:
2277 oldname = os.path.basename(oldpath)
2278 newname = os.path.basename(newpath)
2279 self.sr.lvmCache.rename(oldname, newname)
2281 @override
2282 def update_slaves_on_cbt_disable(self, cbtlog) -> None:
2283 args = {
2284 "vgName": self.sr.vgname,
2285 "action1": "deactivateNoRefcount",
2286 "lvName1": cbtlog
2287 }
2289 host_refs = util.get_hosts_attached_on(self.session, [self.uuid])
2291 message = f"Deactivating {cbtlog}"
2292 self.sr.call_on_slave(args, host_refs, message)
2294 @override
2295 def _activate_cbt_log(self, lv_name) -> bool:
2296 self.sr.lvmCache.refresh()
2297 if not self.sr.lvmCache.is_active(lv_name): 2297 ↛ 2298line 2297 didn't jump to line 2298, because the condition on line 2297 was never true
2298 try:
2299 self.sr.lvmCache.activateNoRefcount(lv_name)
2300 return True
2301 except Exception as e:
2302 util.SMlog("Exception in _activate_cbt_log, "
2303 "Error: %s." % str(e))
2304 raise
2305 else:
2306 return False
2308 @override
2309 def _deactivate_cbt_log(self, lv_name) -> None:
2310 try:
2311 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2312 except Exception as e:
2313 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2314 raise
2316 @override
2317 def _cbt_log_exists(self, logpath) -> bool:
2318 return lvutil.exists(logpath)
2320if __name__ == '__main__': 2320 ↛ 2321line 2320 didn't jump to line 2321, because the condition on line 2320 was never true
2321 SRCommand.run(LVMSR, DRIVER_INFO)
2322else:
2323 SR.registerSR(LVMSR)