Coverage for drivers/LVMSR.py : 48%
Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVMSR: VHD and QCOW2 on LVM storage repository
19#
21from sm_typing import Dict, List, override
23import SR
24from SR import deviceCheck
25import VDI
26import SRCommand
27import util
28import lvutil
29import lvmcache
30import scsiutil
31import lock
32import os
33import sys
34import time
35import errno
36import xs_errors
37import cleanup
38import blktap2
39from journaler import Journaler
40from refcounter import RefCounter
41from ipc import IPCFlag
42from constants import NS_PREFIX_LVM, VG_LOCATION, VG_PREFIX
43from cowutil import CowUtil, getCowUtil, getImageStringFromVdiType, getVdiTypeFromImageFormat
44from lvmcowutil import LV_PREFIX, LvmCowUtil
45from lvmanager import LVActivator
46from vditype import VdiType
47import XenAPI # pylint: disable=import-error
48import re
49from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
50 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
51 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
52 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
53 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
54from metadata import retrieveXMLfromFile, _parseXML
55from xmlrpc.client import DateTime
56import glob
57from constants import CBTLOG_TAG
58from fairlock import Fairlock
59DEV_MAPPER_ROOT = os.path.join('/dev/mapper', VG_PREFIX)
61geneology: Dict[str, List[str]] = {}
62CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
63 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
64 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
65 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
66 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
68CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
70DRIVER_INFO = {
71 'name': 'Local VHD and QCOW2 on LVM',
72 'description': 'SR plugin which represents disks as VHD and QCOW2 disks on ' + \
73 'Logical Volumes within a locally-attached Volume Group',
74 'vendor': 'XenSource Inc',
75 'copyright': '(C) 2008 XenSource Inc',
76 'driver_version': '1.0',
77 'required_api_version': '1.0',
78 'capabilities': CAPABILITIES,
79 'configuration': CONFIGURATION
80 }
82CREATE_PARAM_TYPES = {
83 "raw": VdiType.RAW,
84 "vhd": VdiType.VHD,
85 "qcow2": VdiType.QCOW2
86}
88OPS_EXCLUSIVE = [
89 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
90 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
91 "vdi_clone"]
93# Log if snapshot pauses VM for more than this many seconds
94LONG_SNAPTIME = 60
96class LVMSR(SR.SR):
97 DRIVER_TYPE = 'lvhd'
99 PROVISIONING_TYPES = ["thin", "thick"]
100 PROVISIONING_DEFAULT = "thick"
101 THIN_PLUGIN = "lvhd-thin"
103 PLUGIN_ON_SLAVE = "on-slave"
105 FLAG_USE_VHD = "use_vhd"
106 MDVOLUME_NAME = "MGT"
108 ALLOCATION_QUANTUM = "allocation_quantum"
109 INITIAL_ALLOCATION = "initial_allocation"
111 LOCK_RETRY_INTERVAL = 3
112 LOCK_RETRY_ATTEMPTS = 10
114 TEST_MODE_KEY = "testmode"
115 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
116 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
117 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
118 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
119 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
120 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
121 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
123 ENV_VAR_VHD_TEST = {
124 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
125 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
126 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
127 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
128 TEST_MODE_VHD_FAIL_REPARENT_END:
129 "VHD_UTIL_TEST_FAIL_REPARENT_END",
130 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
131 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
132 TEST_MODE_VHD_FAIL_RESIZE_DATA:
133 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
134 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
135 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
136 TEST_MODE_VHD_FAIL_RESIZE_END:
137 "VHD_UTIL_TEST_FAIL_RESIZE_END"
138 }
139 testMode = ""
141 legacyMode = True
143 @override
144 @staticmethod
145 def handles(type) -> bool:
146 """Returns True if this SR class understands the given dconf string"""
147 # we can pose as LVMSR or EXTSR for compatibility purposes
148 if __name__ == '__main__':
149 name = sys.argv[0]
150 else:
151 name = __name__
152 if name.endswith("LVMSR"):
153 return type == "lvm"
154 elif name.endswith("EXTSR"):
155 return type == "ext"
156 return type == LVMSR.DRIVER_TYPE
158 def __init__(self, srcmd, sr_uuid):
159 SR.SR.__init__(self, srcmd, sr_uuid)
160 self._init_preferred_image_formats()
162 @override
163 def load(self, sr_uuid) -> None:
164 self.ops_exclusive = OPS_EXCLUSIVE
166 self.isMaster = False
167 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
168 self.isMaster = True
170 self.lock = lock.Lock(lock.LOCK_TYPE_SR, self.uuid)
171 self.sr_vditype = SR.DEFAULT_TAP
172 self.uuid = sr_uuid
173 self.vgname = VG_PREFIX + self.uuid
174 self.path = os.path.join(VG_LOCATION, self.vgname)
175 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
176 self.provision = self.PROVISIONING_DEFAULT
178 has_sr_ref = self.srcmd.params.get("sr_ref")
179 if has_sr_ref:
180 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
181 else:
182 self.other_conf = None
184 self.lvm_conf = None
185 if self.other_conf:
186 self.lvm_conf = self.other_conf.get('lvm-conf')
188 try:
189 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf)
190 except:
191 raise xs_errors.XenError('SRUnavailable', \
192 opterr='Failed to initialise the LVMCache')
193 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
194 self.journaler = Journaler(self.lvmCache)
195 if not has_sr_ref:
196 return # must be a probe call
197 # Test for thick vs thin provisioning conf parameter
198 if 'allocation' in self.dconf: 198 ↛ 199line 198 didn't jump to line 199, because the condition on line 198 was never true
199 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
200 self.provision = self.dconf['allocation']
201 else:
202 raise xs_errors.XenError('InvalidArg', \
203 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
205 if self.other_conf.get(self.TEST_MODE_KEY): 205 ↛ 209line 205 didn't jump to line 209, because the condition on line 205 was never false
206 self.testMode = self.other_conf[self.TEST_MODE_KEY]
207 self._prepareTestMode()
209 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
210 # sm_config flag overrides PBD, if any
211 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
212 self.provision = self.sm_config.get('allocation')
214 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
215 self.legacyMode = False
217 if lvutil._checkVG(self.vgname):
218 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 218 ↛ 221line 218 didn't jump to line 221, because the condition on line 218 was never false
219 "vdi_activate", "vdi_deactivate"]:
220 self._undoAllJournals()
221 if not self.cmd in ["sr_attach", "sr_probe"]:
222 self._checkMetadataVolume()
224 self.mdexists = False
226 # get a VDI -> TYPE map from the storage
227 contains_uuid_regex = \
228 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
229 self.storageVDIs = {}
231 for key in self.lvmCache.lvs.keys(): 231 ↛ 233line 231 didn't jump to line 233, because the loop on line 231 never started
232 # if the lvname has a uuid in it
233 type = None
234 vdi = None
235 if contains_uuid_regex.search(key) is not None:
236 for vdi_type, prefix in LV_PREFIX.items():
237 if key.startswith(prefix):
238 vdi = key[len(prefix):]
239 self.storageVDIs[vdi] = vdi_type
240 break
242 # check if metadata volume exists
243 try:
244 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
245 except:
246 pass
248 @override
249 def cleanup(self) -> None:
250 # we don't need to hold the lock to dec refcounts of activated LVs
251 if not self.lvActivator.deactivateAll(): 251 ↛ 252line 251 didn't jump to line 252, because the condition on line 251 was never true
252 raise util.SMException("failed to deactivate LVs")
254 def updateSRMetadata(self, allocation):
255 try:
256 # Add SR specific SR metadata
257 sr_info = \
258 {ALLOCATION_TAG: allocation,
259 UUID_TAG: self.uuid,
260 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
261 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
262 }
264 vdi_info = {}
265 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
266 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
268 vdi_type = self.session.xenapi.VDI.get_sm_config(vdi).get('vdi_type')
269 if not vdi_type:
270 raise xs_errors.XenError('MetadataError', opterr=f"Missing `vdi_type` for VDI {vdi_uuid}")
272 # Create the VDI entry in the SR metadata
273 vdi_info[vdi_uuid] = \
274 {
275 UUID_TAG: vdi_uuid,
276 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
277 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
278 IS_A_SNAPSHOT_TAG: \
279 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
280 SNAPSHOT_OF_TAG: \
281 self.session.xenapi.VDI.get_snapshot_of(vdi),
282 SNAPSHOT_TIME_TAG: \
283 self.session.xenapi.VDI.get_snapshot_time(vdi),
284 TYPE_TAG: \
285 self.session.xenapi.VDI.get_type(vdi),
286 VDI_TYPE_TAG: \
287 vdi_type,
288 READ_ONLY_TAG: \
289 int(self.session.xenapi.VDI.get_read_only(vdi)),
290 METADATA_OF_POOL_TAG: \
291 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
292 MANAGED_TAG: \
293 int(self.session.xenapi.VDI.get_managed(vdi))
294 }
295 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
297 except Exception as e:
298 raise xs_errors.XenError('MetadataError', \
299 opterr='Error upgrading SR Metadata: %s' % str(e))
301 def syncMetadataAndStorage(self):
302 try:
303 # if a VDI is present in the metadata but not in the storage
304 # then delete it from the metadata
305 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
306 for vdi in list(vdi_info.keys()):
307 update_map = {}
308 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 308 ↛ 315line 308 didn't jump to line 315, because the condition on line 308 was never false
309 # delete this from metadata
310 LVMMetadataHandler(self.mdpath). \
311 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
312 else:
313 # search for this in the metadata, compare types
314 # self.storageVDIs is a map of vdi_uuid to vdi_type
315 if vdi_info[vdi][VDI_TYPE_TAG] != \
316 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
317 # storage type takes authority
318 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
319 = METADATA_OBJECT_TYPE_VDI
320 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
321 update_map[VDI_TYPE_TAG] = \
322 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
323 LVMMetadataHandler(self.mdpath) \
324 .updateMetadata(update_map)
325 else:
326 # This should never happen
327 pass
329 except Exception as e:
330 raise xs_errors.XenError('MetadataError', \
331 opterr='Error synching SR Metadata and storage: %s' % str(e))
333 def syncMetadataAndXapi(self):
334 try:
335 # get metadata
336 (sr_info, vdi_info) = \
337 LVMMetadataHandler(self.mdpath, False).getMetadata()
339 # First synch SR parameters
340 self.update(self.uuid)
342 # Now update the VDI information in the metadata if required
343 for vdi_offset in vdi_info.keys():
344 try:
345 vdi_ref = \
346 self.session.xenapi.VDI.get_by_uuid( \
347 vdi_info[vdi_offset][UUID_TAG])
348 except:
349 # may be the VDI is not in XAPI yet dont bother
350 continue
352 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
353 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
355 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
356 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
357 new_name_description:
358 update_map = {}
359 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
360 METADATA_OBJECT_TYPE_VDI
361 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
362 update_map[NAME_LABEL_TAG] = new_name_label
363 update_map[NAME_DESCRIPTION_TAG] = new_name_description
364 LVMMetadataHandler(self.mdpath) \
365 .updateMetadata(update_map)
366 except Exception as e:
367 raise xs_errors.XenError('MetadataError', \
368 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
370 def _checkMetadataVolume(self):
371 util.SMlog("Entering _checkMetadataVolume")
372 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
373 if self.isMaster: 373 ↛ 389line 373 didn't jump to line 389, because the condition on line 373 was never false
374 if self.mdexists and self.cmd == "sr_attach":
375 try:
376 # activate the management volume
377 # will be deactivated at detach time
378 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
379 self._synchSmConfigWithMetaData()
380 util.SMlog("Sync SR metadata and the state on the storage.")
381 self.syncMetadataAndStorage()
382 self.syncMetadataAndXapi()
383 except Exception as e:
384 util.SMlog("Exception in _checkMetadataVolume, " \
385 "Error: %s." % str(e))
386 elif not self.mdexists and not self.legacyMode: 386 ↛ 389line 386 didn't jump to line 389, because the condition on line 386 was never false
387 self._introduceMetaDataVolume()
389 if self.mdexists:
390 self.legacyMode = False
392 def _synchSmConfigWithMetaData(self):
393 util.SMlog("Synching sm-config with metadata volume")
395 try:
396 # get SR info from metadata
397 sr_info = {}
398 map = {}
399 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
401 if sr_info == {}: 401 ↛ 402line 401 didn't jump to line 402, because the condition on line 401 was never true
402 raise Exception("Failed to get SR information from metadata.")
404 if "allocation" in sr_info: 404 ↛ 408line 404 didn't jump to line 408, because the condition on line 404 was never false
405 self.provision = sr_info.get("allocation")
406 map['allocation'] = sr_info.get("allocation")
407 else:
408 raise Exception("Allocation key not found in SR metadata. "
409 "SR info found: %s" % sr_info)
411 except Exception as e:
412 raise xs_errors.XenError(
413 'MetadataError',
414 opterr='Error reading SR params from '
415 'metadata Volume: %s' % str(e))
416 try:
417 map[self.FLAG_USE_VHD] = 'true'
418 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
419 except:
420 raise xs_errors.XenError(
421 'MetadataError',
422 opterr='Error updating sm_config key')
424 def _introduceMetaDataVolume(self):
425 util.SMlog("Creating Metadata volume")
426 try:
427 config = {}
428 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
430 # activate the management volume, will be deactivated at detach time
431 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
433 name_label = util.to_plain_string( \
434 self.session.xenapi.SR.get_name_label(self.sr_ref))
435 name_description = util.to_plain_string( \
436 self.session.xenapi.SR.get_name_description(self.sr_ref))
437 config[self.FLAG_USE_VHD] = "true"
438 config['allocation'] = self.provision
439 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
441 # Add the SR metadata
442 self.updateSRMetadata(self.provision)
443 except Exception as e:
444 raise xs_errors.XenError('MetadataError', \
445 opterr='Error introducing Metadata Volume: %s' % str(e))
447 def _removeMetadataVolume(self):
448 if self.mdexists:
449 try:
450 self.lvmCache.remove(self.MDVOLUME_NAME)
451 except:
452 raise xs_errors.XenError('MetadataError', \
453 opterr='Failed to delete MGT Volume')
455 def _refresh_size(self):
456 """
457 Refreshs the size of the backing device.
458 Return true if all paths/devices agree on the same size.
459 """
460 if hasattr(self, 'SCSIid'): 460 ↛ 462line 460 didn't jump to line 462, because the condition on line 460 was never true
461 # LVMoHBASR, LVMoISCSISR
462 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
463 else:
464 # LVMSR
465 devices = self.dconf['device'].split(',')
466 scsiutil.refreshdev(devices)
467 return True
469 def _expand_size(self):
470 """
471 Expands the size of the SR by growing into additional availiable
472 space, if extra space is availiable on the backing device.
473 Needs to be called after a successful call of _refresh_size.
474 """
475 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
476 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
477 resizethreshold = 100 * 1024 * 1024 # 100MB
478 devices = self.dconf['device'].split(',')
479 totaldevicesize = 0
480 for device in devices:
481 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
482 if totaldevicesize >= (currentvgsize + resizethreshold):
483 try:
484 if hasattr(self, 'SCSIid'): 484 ↛ 486line 484 didn't jump to line 486, because the condition on line 484 was never true
485 # LVMoHBASR, LVMoISCSISR might have slaves
486 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
487 getattr(self, 'SCSIid'))
488 util.SMlog("LVMSR._expand_size for %s will resize the pv." %
489 self.uuid)
490 for pv in lvutil.get_pv_for_vg(self.vgname):
491 lvutil.resizePV(pv)
492 except:
493 util.logException("LVMSR._expand_size for %s failed to resize"
494 " the PV" % self.uuid)
496 @override
497 @deviceCheck
498 def create(self, uuid, size) -> None:
499 util.SMlog("LVMSR.create for %s" % self.uuid)
500 if not self.isMaster:
501 util.SMlog('sr_create blocked for non-master')
502 raise xs_errors.XenError('LVMMaster')
504 if lvutil._checkVG(self.vgname):
505 raise xs_errors.XenError('SRExists')
507 # Check none of the devices already in use by other PBDs
508 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
509 raise xs_errors.XenError('SRInUse')
511 # Check serial number entry in SR records
512 for dev in self.dconf['device'].split(','):
513 if util.test_scsiserial(self.session, dev):
514 raise xs_errors.XenError('SRInUse')
516 lvutil.createVG(self.dconf['device'], self.vgname)
518 #Update serial number string
519 scsiutil.add_serial_record(self.session, self.sr_ref, \
520 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
522 # since this is an SR.create turn off legacy mode
523 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
524 self.FLAG_USE_VHD, 'true')
526 @override
527 def delete(self, uuid) -> None:
528 util.SMlog("LVMSR.delete for %s" % self.uuid)
529 if not self.isMaster:
530 raise xs_errors.XenError('LVMMaster')
531 cleanup.gc_force(self.session, self.uuid)
533 success = True
534 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
535 if util.extractSRFromDevMapper(fileName) != self.uuid:
536 continue
538 if util.doesFileHaveOpenHandles(fileName):
539 util.SMlog("LVMSR.delete: The dev mapper entry %s has open " \
540 "handles" % fileName)
541 success = False
542 continue
544 # Now attempt to remove the dev mapper entry
545 if not lvutil.removeDevMapperEntry(fileName, False):
546 success = False
547 continue
549 try:
550 lvname = os.path.basename(fileName.replace('-', '/'). \
551 replace('//', '-'))
552 lpath = os.path.join(self.path, lvname)
553 os.unlink(lpath)
554 except OSError as e:
555 if e.errno != errno.ENOENT:
556 util.SMlog("LVMSR.delete: failed to remove the symlink for " \
557 "file %s. Error: %s" % (fileName, str(e)))
558 success = False
560 if success:
561 try:
562 if util.pathexists(self.path):
563 os.rmdir(self.path)
564 except Exception as e:
565 util.SMlog("LVMSR.delete: failed to remove the symlink " \
566 "directory %s. Error: %s" % (self.path, str(e)))
567 success = False
569 self._removeMetadataVolume()
570 self.lvmCache.refresh()
571 if LvmCowUtil.getVolumeInfo(self.lvmCache):
572 raise xs_errors.XenError('SRNotEmpty')
574 if not success:
575 raise Exception("LVMSR delete failed, please refer to the log " \
576 "for details.")
578 lvutil.removeVG(self.dconf['device'], self.vgname)
579 self._cleanup()
581 @override
582 def attach(self, uuid) -> None:
583 util.SMlog("LVMSR.attach for %s" % self.uuid)
585 self._cleanup(True) # in case of host crashes, if detach wasn't called
587 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 587 ↛ 588line 587 didn't jump to line 588, because the condition on line 587 was never true
588 raise xs_errors.XenError('SRUnavailable', \
589 opterr='no such volume group: %s' % self.vgname)
591 # Refresh the metadata status
592 self._checkMetadataVolume()
594 refreshsizeok = self._refresh_size()
596 if self.isMaster: 596 ↛ 607line 596 didn't jump to line 607, because the condition on line 596 was never false
597 if refreshsizeok: 597 ↛ 601line 597 didn't jump to line 601, because the condition on line 597 was never false
598 self._expand_size()
600 # Update SCSIid string
601 util.SMlog("Calling devlist_to_serial")
602 scsiutil.add_serial_record(
603 self.session, self.sr_ref,
604 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
606 # Test Legacy Mode Flag and update if COW volumes exist
607 if self.isMaster and self.legacyMode: 607 ↛ 608line 607 didn't jump to line 608, because the condition on line 607 was never true
608 vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
609 for uuid, info in vdiInfo.items():
610 if VdiType.isCowImage(info.vdiType):
611 self.legacyMode = False
612 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
613 self._introduceMetaDataVolume()
614 break
616 # Set the block scheduler
617 for dev in self.dconf['device'].split(','):
618 self.block_setscheduler(dev)
620 @override
621 def detach(self, uuid) -> None:
622 util.SMlog("LVMSR.detach for %s" % self.uuid)
623 cleanup.abort(self.uuid)
625 # Do a best effort cleanup of the dev mapper entries
626 # go through all devmapper entries for this VG
627 success = True
628 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
629 if util.extractSRFromDevMapper(fileName) != self.uuid: 629 ↛ 630line 629 didn't jump to line 630, because the condition on line 629 was never true
630 continue
632 with Fairlock('devicemapper'):
633 # check if any file has open handles
634 if util.doesFileHaveOpenHandles(fileName):
635 # if yes, log this and signal failure
636 util.SMlog(
637 f"LVMSR.detach: The dev mapper entry {fileName} has "
638 "open handles")
639 success = False
640 continue
642 # Now attempt to remove the dev mapper entry
643 if not lvutil.removeDevMapperEntry(fileName, False): 643 ↛ 644line 643 didn't jump to line 644, because the condition on line 643 was never true
644 success = False
645 continue
647 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
648 try:
649 lvname = os.path.basename(fileName.replace('-', '/'). \
650 replace('//', '-'))
651 lvname = os.path.join(self.path, lvname)
652 util.force_unlink(lvname)
653 except Exception as e:
654 util.SMlog("LVMSR.detach: failed to remove the symlink for " \
655 "file %s. Error: %s" % (fileName, str(e)))
656 success = False
658 # now remove the directory where the symlinks are
659 # this should pass as the directory should be empty by now
660 if success:
661 try:
662 if util.pathexists(self.path): 662 ↛ 663line 662 didn't jump to line 663, because the condition on line 662 was never true
663 os.rmdir(self.path)
664 except Exception as e:
665 util.SMlog("LVMSR.detach: failed to remove the symlink " \
666 "directory %s. Error: %s" % (self.path, str(e)))
667 success = False
669 if not success:
670 raise Exception("SR detach failed, please refer to the log " \
671 "for details.")
673 # Don't delete lock files on the master as it will break the locking
674 # between SM and any GC thread that survives through SR.detach.
675 # However, we should still delete lock files on slaves as it is the
676 # only place to do so.
677 self._cleanup(self.isMaster)
679 @override
680 def forget_vdi(self, uuid) -> None:
681 if not self.legacyMode:
682 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
683 super(LVMSR, self).forget_vdi(uuid)
685 @override
686 def scan(self, uuid) -> None:
687 activated_lvs = set()
688 try:
689 util.SMlog("LVMSR.scan for %s" % self.uuid)
690 if not self.isMaster: 690 ↛ 691line 690 didn't jump to line 691, because the condition on line 690 was never true
691 util.SMlog('sr_scan blocked for non-master')
692 raise xs_errors.XenError('LVMMaster')
694 if self._refresh_size(): 694 ↛ 696line 694 didn't jump to line 696, because the condition on line 694 was never false
695 self._expand_size()
696 self.lvmCache.refresh()
697 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
698 self._loadvdis()
699 stats = lvutil._getVGstats(self.vgname)
700 self.physical_size = stats['physical_size']
701 self.physical_utilisation = stats['physical_utilisation']
703 # Now check if there are any VDIs in the metadata, which are not in
704 # XAPI
705 if self.mdexists: 705 ↛ 816line 705 didn't jump to line 816, because the condition on line 705 was never false
706 vdiToSnaps: Dict[str, List[str]] = {}
707 # get VDIs from XAPI
708 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
709 vdi_uuids = set([])
710 for vdi in vdis:
711 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
713 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
715 for vdi in list(info.keys()):
716 vdi_uuid = info[vdi][UUID_TAG]
717 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 717 ↛ 718line 717 didn't jump to line 718, because the condition on line 717 was never true
718 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
719 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
720 else:
721 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
723 if vdi_uuid not in vdi_uuids:
724 util.SMlog("Introduce VDI %s as it is present in " \
725 "metadata and not in XAPI." % vdi_uuid)
726 vdi_type = info[vdi][VDI_TYPE_TAG]
727 sm_config = {}
728 sm_config['vdi_type'] = vdi_type
729 lvname = "%s%s" % (LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
730 self.lvActivator.activate(
731 vdi_uuid, lvname, LVActivator.NORMAL)
732 activated_lvs.add(vdi_uuid)
733 lvPath = os.path.join(self.path, lvname)
735 if not VdiType.isCowImage(vdi_type): 735 ↛ 736line 735 didn't jump to line 736, because the condition on line 735 was never true
736 size = self.lvmCache.getSize(LV_PREFIX[vdi_type] + vdi_uuid)
737 utilisation = \
738 util.roundup(lvutil.LVM_SIZE_INCREMENT,
739 int(size))
740 else:
741 cowutil = getCowUtil(vdi_type)
742 lvmcowutil = LvmCowUtil(cowutil)
744 parent = cowutil.getParentNoCheck(lvPath)
746 if parent is not None: 746 ↛ 747line 746 didn't jump to line 747, because the condition on line 746 was never true
747 sm_config['vhd-parent'] = parent[parent.find('-') + 1:]
748 size = cowutil.getSizeVirt(lvPath)
749 if self.provision == "thin": 749 ↛ 750line 749 didn't jump to line 750, because the condition on line 749 was never true
750 utilisation = util.roundup(
751 lvutil.LVM_SIZE_INCREMENT,
752 cowutil.calcOverheadEmpty(max(size, cowutil.getDefaultPreallocationSizeVirt()))
753 )
754 else:
755 utilisation = lvmcowutil.calcVolumeSize(int(size))
757 vdi_ref = self.session.xenapi.VDI.db_introduce(
758 vdi_uuid,
759 info[vdi][NAME_LABEL_TAG],
760 info[vdi][NAME_DESCRIPTION_TAG],
761 self.sr_ref,
762 info[vdi][TYPE_TAG],
763 False,
764 bool(int(info[vdi][READ_ONLY_TAG])),
765 {},
766 vdi_uuid,
767 {},
768 sm_config)
770 self.session.xenapi.VDI.set_managed(vdi_ref,
771 bool(int(info[vdi][MANAGED_TAG])))
772 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
773 str(size))
774 self.session.xenapi.VDI.set_physical_utilisation( \
775 vdi_ref, str(utilisation))
776 self.session.xenapi.VDI.set_is_a_snapshot( \
777 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG])))
778 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 778 ↛ 779line 778 didn't jump to line 779, because the condition on line 778 was never true
779 self.session.xenapi.VDI.set_snapshot_time( \
780 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG]))
781 if info[vdi][TYPE_TAG] == 'metadata': 781 ↛ 782line 781 didn't jump to line 782, because the condition on line 781 was never true
782 self.session.xenapi.VDI.set_metadata_of_pool( \
783 vdi_ref, info[vdi][METADATA_OF_POOL_TAG])
785 # Update CBT status of disks either just added
786 # or already in XAPI
787 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
788 if cbt_logname in cbt_vdis: 788 ↛ 789line 788 didn't jump to line 789, because the condition on line 788 was never true
789 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
790 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
791 # For existing VDIs, update local state too
792 # Scan in base class SR updates existing VDIs
793 # again based on local states
794 if vdi_uuid in self.vdis:
795 self.vdis[vdi_uuid].cbt_enabled = True
796 cbt_vdis.remove(cbt_logname)
798 # Now set the snapshot statuses correctly in XAPI
799 for srcvdi in vdiToSnaps.keys(): 799 ↛ 800line 799 didn't jump to line 800, because the loop on line 799 never started
800 try:
801 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
802 except:
803 # the source VDI no longer exists, continue
804 continue
806 for snapvdi in vdiToSnaps[srcvdi]:
807 try:
808 # this might fail in cases where its already set
809 snapref = \
810 self.session.xenapi.VDI.get_by_uuid(snapvdi)
811 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
812 except Exception as e:
813 util.SMlog("Setting snapshot failed. " \
814 "Error: %s" % str(e))
816 if cbt_vdis: 816 ↛ 827line 816 didn't jump to line 827, because the condition on line 816 was never false
817 # If we have items remaining in this list,
818 # they are cbt_metadata VDI that XAPI doesn't know about
819 # Add them to self.vdis and they'll get added to the DB
820 for cbt_vdi in cbt_vdis: 820 ↛ 821line 820 didn't jump to line 821, because the loop on line 820 never started
821 cbt_uuid = cbt_vdi.split(".")[0]
822 new_vdi = self.vdi(cbt_uuid)
823 new_vdi.ty = "cbt_metadata"
824 new_vdi.cbt_enabled = True
825 self.vdis[cbt_uuid] = new_vdi
827 super(LVMSR, self).scan(uuid)
828 self._kickGC()
830 finally:
831 for vdi in activated_lvs:
832 self.lvActivator.deactivate(
833 vdi, LVActivator.NORMAL, False)
835 @override
836 def update(self, uuid) -> None:
837 if not lvutil._checkVG(self.vgname): 837 ↛ 838line 837 didn't jump to line 838, because the condition on line 837 was never true
838 return
839 self._updateStats(uuid, 0)
841 if self.legacyMode: 841 ↛ 842line 841 didn't jump to line 842, because the condition on line 841 was never true
842 return
844 # synch name_label in metadata with XAPI
845 update_map = {}
846 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
847 METADATA_OBJECT_TYPE_SR,
848 NAME_LABEL_TAG: util.to_plain_string( \
849 self.session.xenapi.SR.get_name_label(self.sr_ref)),
850 NAME_DESCRIPTION_TAG: util.to_plain_string( \
851 self.session.xenapi.SR.get_name_description(self.sr_ref))
852 }
853 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
855 def _updateStats(self, uuid, virtAllocDelta):
856 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
857 self.virtual_allocation = valloc + virtAllocDelta
858 util.SMlog("Setting virtual_allocation of SR %s to %d" %
859 (uuid, self.virtual_allocation))
860 stats = lvutil._getVGstats(self.vgname)
861 self.physical_size = stats['physical_size']
862 self.physical_utilisation = stats['physical_utilisation']
863 self._db_update()
865 @override
866 @deviceCheck
867 def probe(self) -> str:
868 return lvutil.srlist_toxml(
869 lvutil.scan_srlist(VG_PREFIX, self.dconf['device']),
870 VG_PREFIX,
871 ('metadata' in self.srcmd.params['sr_sm_config'] and \
872 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
874 @override
875 def vdi(self, uuid) -> VDI.VDI:
876 return LVMVDI(self, uuid)
878 def _loadvdis(self):
879 self.virtual_allocation = 0
880 self.vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
881 self.allVDIs = {}
883 for uuid, info in self.vdiInfo.items():
884 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 884 ↛ 885line 884 didn't jump to line 885, because the condition on line 884 was never true
885 continue
886 if info.scanError: 886 ↛ 887line 886 didn't jump to line 887, because the condition on line 886 was never true
887 raise xs_errors.XenError('VDIUnavailable', \
888 opterr='Error scanning VDI %s' % uuid)
889 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
890 if not self.vdis[uuid].hidden: 890 ↛ 883line 890 didn't jump to line 883, because the condition on line 890 was never false
891 self.virtual_allocation += self.vdis[uuid].utilisation
893 for uuid, vdi in self.vdis.items():
894 if vdi.parent: 894 ↛ 895line 894 didn't jump to line 895, because the condition on line 894 was never true
895 if vdi.parent in self.vdis:
896 self.vdis[vdi.parent].read_only = True
897 if vdi.parent in geneology:
898 geneology[vdi.parent].append(uuid)
899 else:
900 geneology[vdi.parent] = [uuid]
902 # Now remove all hidden leaf nodes to avoid introducing records that
903 # will be GC'ed
904 for uuid in list(self.vdis.keys()):
905 if uuid not in geneology and self.vdis[uuid].hidden: 905 ↛ 906line 905 didn't jump to line 906, because the condition on line 905 was never true
906 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
907 del self.vdis[uuid]
909 def _ensureSpaceAvailable(self, amount_needed):
910 space_available = lvutil._getVGstats(self.vgname)['freespace']
911 if (space_available < amount_needed):
912 util.SMlog("Not enough space! free space: %d, need: %d" % \
913 (space_available, amount_needed))
914 raise xs_errors.XenError('SRNoSpace')
916 def _handleInterruptedCloneOps(self):
917 entries = self.journaler.getAll(LVMVDI.JRN_CLONE)
918 for uuid, val in entries.items(): 918 ↛ 919line 918 didn't jump to line 919, because the loop on line 918 never started
919 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
920 self._handleInterruptedCloneOp(uuid, val)
921 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
922 self.journaler.remove(LVMVDI.JRN_CLONE, uuid)
924 def _handleInterruptedCoalesceLeaf(self):
925 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
926 if len(entries) > 0: 926 ↛ 927line 926 didn't jump to line 927, because the condition on line 926 was never true
927 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
928 cleanup.gc_force(self.session, self.uuid)
929 self.lvmCache.refresh()
931 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
932 """Either roll back or finalize the interrupted snapshot/clone
933 operation. Rolling back is unsafe if the leaf images have already been
934 in use and written to. However, it is always safe to roll back while
935 we're still in the context of the failed snapshot operation since the
936 VBD is paused for the duration of the operation"""
937 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
938 lvs = LvmCowUtil.getVolumeInfo(self.lvmCache)
939 baseUuid, clonUuid = jval.split("_")
941 # is there a "base copy" VDI?
942 if not lvs.get(baseUuid):
943 # no base copy: make sure the original is there
944 if lvs.get(origUuid):
945 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
946 return
947 raise util.SMException("base copy %s not present, " \
948 "but no original %s found" % (baseUuid, origUuid))
950 vdis = LvmCowUtil.getVDIInfo(self.lvmCache)
951 base = vdis[baseUuid]
952 cowutil = getCowUtil(base.vdiType)
954 if forceUndo:
955 util.SMlog("Explicit revert")
956 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
957 return
959 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
960 util.SMlog("One or both leaves missing => revert")
961 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
962 return
964 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
965 util.SMlog("One or both leaves invalid => revert")
966 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
967 return
969 orig = vdis[origUuid]
970 self.lvActivator.activate(baseUuid, base.lvName, False)
971 self.lvActivator.activate(origUuid, orig.lvName, False)
972 if orig.parentUuid != baseUuid:
973 parent = vdis[orig.parentUuid]
974 self.lvActivator.activate(parent.uuid, parent.lvName, False)
975 origPath = os.path.join(self.path, orig.lvName)
977 if cowutil.check(origPath) != CowUtil.CheckResult.Success:
978 util.SMlog("Orig image invalid => revert")
979 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
980 return
982 if clonUuid:
983 clon = vdis[clonUuid]
984 clonPath = os.path.join(self.path, clon.lvName)
985 self.lvActivator.activate(clonUuid, clon.lvName, False)
986 if cowutil.check(clonPath) != CowUtil.CheckResult.Success:
987 util.SMlog("Clon image invalid => revert")
988 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
989 return
991 util.SMlog("Snapshot appears valid, will not roll back")
992 self._completeCloneOp(cowutil, vdis, origUuid, baseUuid, clonUuid)
994 def _undoCloneOp(self, cowutil, lvs, origUuid, baseUuid, clonUuid):
995 base = lvs[baseUuid]
996 basePath = os.path.join(self.path, base.name)
998 # make the parent RW
999 if base.readonly:
1000 self.lvmCache.setReadonly(base.name, False)
1002 ns = NS_PREFIX_LVM + self.uuid
1003 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
1004 origRefcountNormal = 0
1006 # un-hide the parent
1007 if VdiType.isCowImage(base.vdiType):
1008 self.lvActivator.activate(baseUuid, base.name, False)
1009 origRefcountNormal = 1
1010 imageInfo = cowutil.getInfo(basePath, LvmCowUtil.extractUuid, False)
1011 if imageInfo.hidden:
1012 cowutil.setHidden(basePath, False)
1013 elif base.hidden:
1014 self.lvmCache.setHidden(base.name, False)
1016 # remove the child nodes
1017 if clonUuid and lvs.get(clonUuid):
1018 if not VdiType.isCowImage(lvs[clonUuid].vdiType):
1019 raise util.SMException("clone %s not a COW image" % clonUuid)
1020 self.lvmCache.remove(lvs[clonUuid].name)
1021 if self.lvActivator.get(clonUuid, False):
1022 self.lvActivator.remove(clonUuid, False)
1023 if lvs.get(origUuid):
1024 self.lvmCache.remove(lvs[origUuid].name)
1026 # inflate the parent to fully-allocated size
1027 if VdiType.isCowImage(base.vdiType):
1028 lvmcowutil = LvmCowUtil(cowutil)
1029 fullSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1030 lvmcowutil.inflate(self.journaler, self.uuid, baseUuid, base.vdiType, fullSize)
1032 # rename back
1033 origLV = LV_PREFIX[base.vdiType] + origUuid
1034 self.lvmCache.rename(base.name, origLV)
1035 RefCounter.reset(baseUuid, ns)
1036 if self.lvActivator.get(baseUuid, False):
1037 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
1038 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1040 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1041 # flag to facilitate vm deactivate
1042 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1043 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1045 # update LVM metadata on slaves
1046 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1047 LvmCowUtil.refreshVolumeOnSlaves(self.session, self.uuid, self.vgname,
1048 origLV, origUuid, slaves)
1050 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1052 def _completeCloneOp(self, cowutil, vdis, origUuid, baseUuid, clonUuid):
1053 """Finalize the interrupted snapshot/clone operation. This must not be
1054 called from the live snapshot op context because we attempt to pause/
1055 unpause the VBD here (the VBD is already paused during snapshot, so it
1056 would cause a deadlock)"""
1057 base = vdis[baseUuid]
1058 clon = None
1059 if clonUuid:
1060 clon = vdis[clonUuid]
1062 cleanup.abort(self.uuid)
1064 # make sure the parent is hidden and read-only
1065 if not base.hidden:
1066 if not VdiType.isCowImage(base.vdiType):
1067 self.lvmCache.setHidden(base.lvName)
1068 else:
1069 basePath = os.path.join(self.path, base.lvName)
1070 cowutil.setHidden(basePath)
1071 if not base.lvReadonly:
1072 self.lvmCache.setReadonly(base.lvName, True)
1074 # NB: since this snapshot-preserving call is only invoked outside the
1075 # snapshot op context, we assume the LVM metadata on the involved slave
1076 # has by now been refreshed and do not attempt to do it here
1078 # Update the original record
1079 try:
1080 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1081 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1082 type = self.session.xenapi.VDI.get_type(vdi_ref)
1083 sm_config["vdi_type"] = vdis[origUuid].vdiType
1084 sm_config['vhd-parent'] = baseUuid
1085 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1086 except XenAPI.Failure:
1087 util.SMlog("ERROR updating the orig record")
1089 # introduce the new VDI records
1090 if clonUuid:
1091 try:
1092 clon_vdi = VDI.VDI(self, clonUuid)
1093 clon_vdi.read_only = False
1094 clon_vdi.location = clonUuid
1095 clon_vdi.utilisation = clon.sizeLV
1096 clon_vdi.sm_config = {
1097 "vdi_type": clon.vdiType,
1098 "vhd-parent": baseUuid}
1100 if not self.legacyMode:
1101 LVMMetadataHandler(self.mdpath). \
1102 ensureSpaceIsAvailableForVdis(1)
1104 clon_vdi_ref = clon_vdi._db_introduce()
1105 util.SMlog("introduced clon VDI: %s (%s)" % \
1106 (clon_vdi_ref, clonUuid))
1108 vdi_info = {UUID_TAG: clonUuid,
1109 NAME_LABEL_TAG: clon_vdi.label,
1110 NAME_DESCRIPTION_TAG: clon_vdi.description,
1111 IS_A_SNAPSHOT_TAG: 0,
1112 SNAPSHOT_OF_TAG: '',
1113 SNAPSHOT_TIME_TAG: '',
1114 TYPE_TAG: type,
1115 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1116 READ_ONLY_TAG: int(clon_vdi.read_only),
1117 MANAGED_TAG: int(clon_vdi.managed),
1118 METADATA_OF_POOL_TAG: ''
1119 }
1121 if not self.legacyMode:
1122 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1124 except XenAPI.Failure:
1125 util.SMlog("ERROR introducing the clon record")
1127 try:
1128 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1129 base_vdi.label = "base copy"
1130 base_vdi.read_only = True
1131 base_vdi.location = baseUuid
1132 base_vdi.size = base.sizeVirt
1133 base_vdi.utilisation = base.sizeLV
1134 base_vdi.managed = False
1135 base_vdi.sm_config = {
1136 "vdi_type": base.vdiType,
1137 "vhd-parent": baseUuid}
1139 if not self.legacyMode:
1140 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1142 base_vdi_ref = base_vdi._db_introduce()
1143 util.SMlog("introduced base VDI: %s (%s)" % \
1144 (base_vdi_ref, baseUuid))
1146 vdi_info = {UUID_TAG: baseUuid,
1147 NAME_LABEL_TAG: base_vdi.label,
1148 NAME_DESCRIPTION_TAG: base_vdi.description,
1149 IS_A_SNAPSHOT_TAG: 0,
1150 SNAPSHOT_OF_TAG: '',
1151 SNAPSHOT_TIME_TAG: '',
1152 TYPE_TAG: type,
1153 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1154 READ_ONLY_TAG: int(base_vdi.read_only),
1155 MANAGED_TAG: int(base_vdi.managed),
1156 METADATA_OF_POOL_TAG: ''
1157 }
1159 if not self.legacyMode:
1160 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1161 except XenAPI.Failure:
1162 util.SMlog("ERROR introducing the base record")
1164 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1166 def _undoAllJournals(self):
1167 """Undo all COW image & SM interrupted journaled operations. This call must
1168 be serialized with respect to all operations that create journals"""
1169 # undoing interrupted inflates must be done first, since undoing COW images
1170 # ops might require inflations
1171 self.lock.acquire()
1172 try:
1173 self._undoAllInflateJournals()
1174 self._undoAllCowJournals()
1175 self._handleInterruptedCloneOps()
1176 self._handleInterruptedCoalesceLeaf()
1177 finally:
1178 self.lock.release()
1179 self.cleanup()
1181 def _undoAllInflateJournals(self):
1182 entries = self.journaler.getAll(LvmCowUtil.JOURNAL_INFLATE)
1183 if len(entries) == 0:
1184 return
1185 self._loadvdis()
1186 for uuid, val in entries.items():
1187 vdi = self.vdis.get(uuid)
1188 if vdi: 1188 ↛ 1208line 1188 didn't jump to line 1208, because the condition on line 1188 was never false
1189 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1190 (uuid, vdi.path, val))
1191 if vdi.readonly: 1191 ↛ 1192line 1191 didn't jump to line 1192, because the condition on line 1191 was never true
1192 self.lvmCache.setReadonly(vdi.lvname, False)
1193 self.lvActivator.activate(uuid, vdi.lvname, False)
1194 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1196 cowutil = getCowUtil(vdi.vdi_type)
1197 lvmcowutil = LvmCowUtil(cowutil)
1199 footer_size = cowutil.getFooterSize()
1200 util.zeroOut(vdi.path, currSizeLV - footer_size, footer_size)
1201 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(val))
1202 if vdi.readonly: 1202 ↛ 1203line 1202 didn't jump to line 1203, because the condition on line 1202 was never true
1203 self.lvmCache.setReadonly(vdi.lvname, True)
1204 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1204 ↛ 1205line 1204 didn't jump to line 1205, because the condition on line 1204 was never true
1205 LvmCowUtil.refreshVolumeOnAllSlaves(
1206 self.session, self.uuid, self.vgname, vdi.lvname, uuid
1207 )
1208 self.journaler.remove(LvmCowUtil.JOURNAL_INFLATE, uuid)
1209 delattr(self, "vdiInfo")
1210 delattr(self, "allVDIs")
1212 def _undoAllCowJournals(self):
1213 """
1214 Check if there are COW journals in existence and revert them.
1215 """
1216 journals = LvmCowUtil.getAllResizeJournals(self.lvmCache)
1217 if len(journals) == 0: 1217 ↛ 1219line 1217 didn't jump to line 1219, because the condition on line 1217 was never false
1218 return
1219 self._loadvdis()
1221 for uuid, jlvName in journals:
1222 vdi = self.vdis[uuid]
1223 util.SMlog("Found COW journal %s, reverting %s" % (uuid, vdi.path))
1224 cowutil = getCowUtil(vdi.vdi_type)
1225 lvmcowutil = LvmCowUtil(cowutil)
1227 self.lvActivator.activate(uuid, vdi.lvname, False)
1228 self.lvmCache.activateNoRefcount(jlvName)
1229 fullSize = lvmcowutil.calcVolumeSize(vdi.size)
1230 lvmcowutil.inflate(self.journaler, self.uuid, vdi.uuid, vdi.vdi_type, fullSize)
1231 try:
1232 jFile = os.path.join(self.path, jlvName)
1233 cowutil.revert(vdi.path, jFile)
1234 except util.CommandException:
1235 util.logException("COW journal revert")
1236 cowutil.check(vdi.path)
1237 util.SMlog("COW image revert failed but COW image ok: removing journal")
1238 # Attempt to reclaim unused space
1241 imageInfo = cowutil.getInfo(vdi.path, LvmCowUtil.extractUuid, False)
1242 NewSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1243 if NewSize < fullSize:
1244 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1245 LvmCowUtil.refreshVolumeOnAllSlaves(self.session, self.uuid, self.vgname, vdi.lvname, uuid)
1246 self.lvmCache.remove(jlvName)
1247 delattr(self, "vdiInfo")
1248 delattr(self, "allVDIs")
1250 def call_on_slave(self, args, host_refs, message: str):
1251 master_ref = util.get_this_host_ref(self.session)
1252 for hostRef in host_refs:
1253 if hostRef == master_ref: 1253 ↛ 1254line 1253 didn't jump to line 1254, because the condition on line 1253 was never true
1254 continue
1255 util.SMlog(f"{message} on slave {hostRef}")
1256 rv = self.session.xenapi.host.call_plugin(
1257 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1258 util.SMlog("call-plugin returned: %s" % rv)
1259 if not rv: 1259 ↛ 1260line 1259 didn't jump to line 1260, because the condition on line 1259 was never true
1260 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1262 def _updateSlavesPreClone(self, hostRefs, origOldLV):
1263 args = {"vgName": self.vgname,
1264 "action1": "deactivateNoRefcount",
1265 "lvName1": origOldLV}
1266 message = "Deactivate VDI"
1267 self.call_on_slave(args, hostRefs, message)
1269 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1270 baseUuid, baseLV):
1271 """We need to reactivate the original LV on each slave (note that the
1272 name for the original LV might change), as well as init the refcount
1273 for the base LV"""
1274 args = {"vgName": self.vgname,
1275 "action1": "refresh",
1276 "lvName1": origLV,
1277 "action2": "activate",
1278 "ns2": NS_PREFIX_LVM + self.uuid,
1279 "lvName2": baseLV,
1280 "uuid2": baseUuid}
1282 message = f"Updating {origOldLV}, {origLV}, {baseLV}"
1283 self.call_on_slave(args, hostRefs, message)
1285 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1286 """Reactivate and refresh CBT log file on slaves"""
1287 args = {"vgName": self.vgname,
1288 "action1": "deactivateNoRefcount",
1289 "lvName1": cbtlog,
1290 "action2": "refresh",
1291 "lvName2": cbtlog}
1293 message = f"Updating {cbtlog}"
1294 self.call_on_slave(args, hostRefs, message)
1296 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1297 """Tell the slave we deleted the base image"""
1298 args = {"vgName": self.vgname,
1299 "action1": "cleanupLockAndRefcount",
1300 "uuid1": baseUuid,
1301 "ns1": NS_PREFIX_LVM + self.uuid}
1303 message = f"Cleaning locks for {baseLV}"
1304 self.call_on_slave(args, hostRefs, message)
1306 def _cleanup(self, skipLockCleanup=False):
1307 """delete stale refcounter, flag, and lock files"""
1308 RefCounter.resetAll(NS_PREFIX_LVM + self.uuid)
1309 IPCFlag(self.uuid).clearAll()
1310 if not skipLockCleanup: 1310 ↛ 1311line 1310 didn't jump to line 1311, because the condition on line 1310 was never true
1311 lock.Lock.cleanupAll(self.uuid)
1312 lock.Lock.cleanupAll(NS_PREFIX_LVM + self.uuid)
1314 def _prepareTestMode(self):
1315 util.SMlog("Test mode: %s" % self.testMode)
1316 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1316 ↛ 1317line 1316 didn't jump to line 1317, because the condition on line 1316 was never true
1317 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1318 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1320 def _kickGC(self):
1321 util.SMlog("Kicking GC")
1322 cleanup.start_gc_service(self.uuid)
1324 def ensureCBTSpace(self):
1325 # Ensure we have space for at least one LV
1326 self._ensureSpaceAvailable(self.journaler.LV_SIZE)
1329class LVMVDI(VDI.VDI):
1331 JRN_CLONE = "clone" # journal entry type for the clone operation
1333 @override
1334 def load(self, vdi_uuid) -> None:
1335 self.lock = self.sr.lock
1336 self.lvActivator = self.sr.lvActivator
1337 self.loaded = False
1338 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1338 ↛ 1340line 1338 didn't jump to line 1340, because the condition on line 1338 was never false
1339 self._setType(VdiType.RAW)
1340 self.uuid = vdi_uuid
1341 self.location = self.uuid
1342 self.exists = True
1344 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1345 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1346 if self.parent: 1346 ↛ 1347line 1346 didn't jump to line 1347, because the condition on line 1346 was never true
1347 self.sm_config_override['vhd-parent'] = self.parent
1348 else:
1349 self.sm_config_override['vhd-parent'] = None
1350 return
1352 # scan() didn't run: determine the type of the VDI manually
1353 if self._determineType(): 1353 ↛ 1357line 1353 didn't jump to line 1357, because the condition on line 1353 was never false
1354 return
1356 # the VDI must be in the process of being created
1357 self.exists = False
1359 vdi_sm_config = self.sr.srcmd.params.get("vdi_sm_config")
1360 if vdi_sm_config:
1361 image_format = vdi_sm_config.get("image-format") or vdi_sm_config.get("type")
1362 if image_format:
1363 try:
1364 self._setType(CREATE_PARAM_TYPES[image_format])
1365 except:
1366 raise xs_errors.XenError('VDICreate', opterr='bad image format')
1367 if self.sr.legacyMode and self.sr.cmd == 'vdi_create' and VdiType.isCowImage(self.vdi_type):
1368 raise xs_errors.XenError('VDICreate', opterr='Cannot create COW type disk in legacy mode')
1370 if not self.vdi_type:
1371 self._setType(getVdiTypeFromImageFormat(self.sr.preferred_image_formats[0]))
1373 self.lvname = "%s%s" % (LV_PREFIX[self.vdi_type], vdi_uuid)
1374 self.path = os.path.join(self.sr.path, self.lvname)
1376 @override
1377 def create(self, sr_uuid, vdi_uuid, size) -> str:
1378 util.SMlog("LVMVDI.create for %s" % self.uuid)
1379 if not self.sr.isMaster:
1380 raise xs_errors.XenError('LVMMaster')
1381 if self.exists:
1382 raise xs_errors.XenError('VDIExists')
1384 size = self.cowutil.validateAndRoundImageSize(int(size))
1386 util.SMlog("LVMVDI.create: type = %s, %s (size=%s)" % \
1387 (self.vdi_type, self.path, size))
1388 lvSize = 0
1389 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1390 if not VdiType.isCowImage(self.vdi_type):
1391 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1392 else:
1393 if self.sr.provision == "thin":
1394 lvSize = util.roundup(
1395 lvutil.LVM_SIZE_INCREMENT,
1396 self.cowutil.calcOverheadEmpty(max(size, self.cowutil.getDefaultPreallocationSizeVirt()))
1397 )
1398 elif self.sr.provision == "thick":
1399 lvSize = self.lvmcowutil.calcVolumeSize(int(size))
1401 self.sr._ensureSpaceAvailable(lvSize)
1403 try:
1404 self.sr.lvmCache.create(self.lvname, lvSize)
1405 if not VdiType.isCowImage(self.vdi_type):
1406 self.size = self.sr.lvmCache.getSize(self.lvname)
1407 else:
1408 self.cowutil.create(
1409 self.path, int(size), False, self.cowutil.getDefaultPreallocationSizeVirt()
1410 )
1411 self.size = self.cowutil.getSizeVirt(self.path)
1412 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1413 except util.CommandException as e:
1414 util.SMlog("Unable to create VDI")
1415 self.sr.lvmCache.remove(self.lvname)
1416 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1418 self.utilisation = lvSize
1419 self.sm_config["vdi_type"] = self.vdi_type
1420 self.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1422 if not self.sr.legacyMode:
1423 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1425 self.ref = self._db_introduce()
1426 self.sr._updateStats(self.sr.uuid, self.size)
1428 vdi_info = {UUID_TAG: self.uuid,
1429 NAME_LABEL_TAG: util.to_plain_string(self.label),
1430 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1431 IS_A_SNAPSHOT_TAG: 0,
1432 SNAPSHOT_OF_TAG: '',
1433 SNAPSHOT_TIME_TAG: '',
1434 TYPE_TAG: self.ty,
1435 VDI_TYPE_TAG: self.vdi_type,
1436 READ_ONLY_TAG: int(self.read_only),
1437 MANAGED_TAG: int(self.managed),
1438 METADATA_OF_POOL_TAG: ''
1439 }
1441 if not self.sr.legacyMode:
1442 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1444 return VDI.VDI.get_params(self)
1446 @override
1447 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None:
1448 util.SMlog("LVMVDI.delete for %s" % self.uuid)
1449 try:
1450 self._loadThis()
1451 except xs_errors.SRException as e:
1452 # Catch 'VDI doesn't exist' exception
1453 if e.errno == 46:
1454 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1455 raise
1457 vdi_ref = self.sr.srcmd.params['vdi_ref']
1458 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1459 raise xs_errors.XenError("VDIDelete", \
1460 opterr="Deleting non-leaf node not permitted")
1462 if not self.hidden:
1463 self._markHidden()
1465 if not data_only:
1466 # Remove from XAPI and delete from MGT
1467 self._db_forget()
1468 else:
1469 # If this is a data_destroy call, don't remove from XAPI db
1470 # Only delete from MGT
1471 if not self.sr.legacyMode:
1472 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1474 # deactivate here because it might be too late to do it in the "final"
1475 # step: GC might have removed the LV by then
1476 if self.sr.lvActivator.get(self.uuid, False):
1477 self.sr.lvActivator.deactivate(self.uuid, False)
1479 try:
1480 self.sr.lvmCache.remove(self.lvname)
1481 self.sr.lock.cleanup(vdi_uuid, NS_PREFIX_LVM + sr_uuid)
1482 self.sr.lock.cleanupAll(vdi_uuid)
1483 except xs_errors.SRException as e:
1484 util.SMlog(
1485 "Failed to remove the volume (maybe is leaf coalescing) "
1486 "for %s err:%d" % (self.uuid, e.errno))
1488 self.sr._updateStats(self.sr.uuid, -self.size)
1489 self.sr._kickGC()
1490 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1492 @override
1493 def attach(self, sr_uuid, vdi_uuid) -> str:
1494 util.SMlog("LVMVDI.attach for %s" % self.uuid)
1495 if self.sr.journaler.hasJournals(self.uuid):
1496 raise xs_errors.XenError('VDIUnavailable',
1497 opterr='Interrupted operation detected on this VDI, '
1498 'scan SR first to trigger auto-repair')
1500 writable = ('args' not in self.sr.srcmd.params) or \
1501 (self.sr.srcmd.params['args'][0] == "true")
1502 needInflate = True
1503 if not VdiType.isCowImage(self.vdi_type) or not writable:
1504 needInflate = False
1505 else:
1506 self._loadThis()
1507 if self.utilisation >= self.lvmcowutil.calcVolumeSize(self.size):
1508 needInflate = False
1510 if needInflate:
1511 try:
1512 self._prepareThin(True, self.vdi_type)
1513 except:
1514 util.logException("attach")
1515 raise xs_errors.XenError('LVMProvisionAttach')
1517 try:
1518 return self._attach()
1519 finally:
1520 if not self.sr.lvActivator.deactivateAll():
1521 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1523 @override
1524 def detach(self, sr_uuid, vdi_uuid) -> None:
1525 util.SMlog("LVMVDI.detach for %s" % self.uuid)
1526 self._loadThis()
1527 already_deflated = (self.utilisation < \
1528 self.lvmcowutil.calcVolumeSize(self.size))
1529 needDeflate = True
1530 if not VdiType.isCowImage(self.vdi_type) or already_deflated:
1531 needDeflate = False
1532 elif self.sr.provision == "thick":
1533 needDeflate = False
1534 # except for snapshots, which are always deflated
1535 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1536 vdi_ref = self.sr.srcmd.params['vdi_ref']
1537 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1538 if snap:
1539 needDeflate = True
1541 if needDeflate:
1542 try:
1543 self._prepareThin(False, self.vdi_type)
1544 except:
1545 util.logException("_prepareThin")
1546 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1548 try:
1549 self._detach()
1550 finally:
1551 if not self.sr.lvActivator.deactivateAll():
1552 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1554 # We only support offline resize
1555 @override
1556 def resize(self, sr_uuid, vdi_uuid, size) -> str:
1557 util.SMlog("LVMVDI.resize for %s" % self.uuid)
1558 if not self.sr.isMaster:
1559 raise xs_errors.XenError('LVMMaster')
1561 self._loadThis()
1562 if self.hidden:
1563 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1565 if size < self.size:
1566 util.SMlog('vdi_resize: shrinking not supported: ' + \
1567 '(current size: %d, new size: %d)' % (self.size, size))
1568 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1570 size = self.cowutil.validateAndRoundImageSize(int(size))
1572 if size == self.size:
1573 return VDI.VDI.get_params(self)
1575 if not VdiType.isCowImage(self.vdi_type):
1576 lvSizeOld = self.size
1577 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1578 else:
1579 lvSizeOld = self.utilisation
1580 lvSizeNew = self.lvmcowutil.calcVolumeSize(size)
1581 if self.sr.provision == "thin":
1582 # VDI is currently deflated, so keep it deflated
1583 lvSizeNew = lvSizeOld
1584 assert(lvSizeNew >= lvSizeOld)
1585 spaceNeeded = lvSizeNew - lvSizeOld
1586 self.sr._ensureSpaceAvailable(spaceNeeded)
1588 oldSize = self.size
1589 if not VdiType.isCowImage(self.vdi_type):
1590 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1591 self.size = self.sr.lvmCache.getSize(self.lvname)
1592 self.utilisation = self.size
1593 else:
1594 if lvSizeNew != lvSizeOld:
1595 self.lvmcowutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type, lvSizeNew)
1596 self.cowutil.setSizeVirtFast(self.path, size)
1597 self.size = self.cowutil.getSizeVirt(self.path)
1598 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1600 vdi_ref = self.sr.srcmd.params['vdi_ref']
1601 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1602 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1603 str(self.utilisation))
1604 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1605 super(LVMVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1606 return VDI.VDI.get_params(self)
1608 @override
1609 def clone(self, sr_uuid, vdi_uuid) -> str:
1610 return self._do_snapshot(
1611 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1613 @override
1614 def compose(self, sr_uuid, vdi1, vdi2) -> None:
1615 util.SMlog("LVMSR.compose for %s -> %s" % (vdi2, vdi1))
1616 if not VdiType.isCowImage(self.vdi_type):
1617 raise xs_errors.XenError('Unimplemented')
1619 parent_uuid = vdi1
1620 parent_lvname = LV_PREFIX[self.vdi_type] + parent_uuid
1621 assert(self.sr.lvmCache.checkLV(parent_lvname))
1622 parent_path = os.path.join(self.sr.path, parent_lvname)
1624 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1625 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1627 self.cowutil.setParent(self.path, parent_path, False)
1628 self.cowutil.setHidden(parent_path)
1629 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1631 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1632 True):
1633 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1635 util.SMlog("Compose done")
1637 def reset_leaf(self, sr_uuid, vdi_uuid):
1638 util.SMlog("LVMSR.reset_leaf for %s" % vdi_uuid)
1639 if not VdiType.isCowImage(self.vdi_type):
1640 raise xs_errors.XenError('Unimplemented')
1642 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1644 # safety check
1645 if not self.cowutil.hasParent(self.path):
1646 raise util.SMException("ERROR: VDI %s has no parent, " + \
1647 "will not reset contents" % self.uuid)
1649 self.cowutil.killData(self.path)
1651 def _attach(self):
1652 self._chainSetActive(True, True, True)
1653 if not util.pathexists(self.path):
1654 raise xs_errors.XenError('VDIUnavailable', \
1655 opterr='Could not find: %s' % self.path)
1657 if not hasattr(self, 'xenstore_data'):
1658 self.xenstore_data = {}
1660 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1661 scsiutil.gen_synthetic_page_data(self.uuid)))
1663 self.xenstore_data['storage-type'] = 'lvm'
1664 self.xenstore_data['vdi-type'] = self.vdi_type
1666 self.attached = True
1667 self.sr.lvActivator.persist()
1668 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1670 def _detach(self):
1671 self._chainSetActive(False, True)
1672 self.attached = False
1674 @override
1675 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1676 cloneOp=False, secondary=None, cbtlog=None, is_mirror_destination=False) -> str:
1677 # If cbt enabled, save file consistency state
1678 if cbtlog is not None:
1679 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1679 ↛ 1680line 1679 didn't jump to line 1680, because the condition on line 1679 was never true
1680 consistency_state = False
1681 else:
1682 consistency_state = True
1683 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1684 (consistency_state, vdi_uuid))
1685 else:
1686 consistency_state = None
1688 pause_time = time.time()
1689 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1689 ↛ 1690line 1689 didn't jump to line 1690, because the condition on line 1689 was never true
1690 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1692 snapResult = None
1693 try:
1694 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state, is_mirror_destination)
1695 except Exception as e1:
1696 try:
1697 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1698 secondary=None)
1699 except Exception as e2:
1700 util.SMlog('WARNING: failed to clean up failed snapshot: '
1701 '%s (error ignored)' % e2)
1702 raise
1703 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary)
1704 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1705 unpause_time = time.time()
1706 if (unpause_time - pause_time) > LONG_SNAPTIME: 1706 ↛ 1707line 1706 didn't jump to line 1707, because the condition on line 1706 was never true
1707 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1708 (unpause_time - pause_time))
1709 return snapResult
1711 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None, is_mirror_destination=False):
1712 util.SMlog("LVMVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1714 if not self.sr.isMaster: 1714 ↛ 1715line 1714 didn't jump to line 1715, because the condition on line 1714 was never true
1715 raise xs_errors.XenError('LVMMaster')
1716 if self.sr.legacyMode: 1716 ↛ 1717line 1716 didn't jump to line 1717, because the condition on line 1716 was never true
1717 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1719 self._loadThis()
1720 if self.hidden: 1720 ↛ 1721line 1720 didn't jump to line 1721, because the condition on line 1720 was never true
1721 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1723 snapVdiType = self.sr._get_snap_vdi_type(self.vdi_type, self.size)
1725 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1726 self.sr.srcmd.params['vdi_ref'])
1727 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1727 ↛ 1728line 1727 didn't jump to line 1728, because the condition on line 1727 was never true
1728 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1729 raise xs_errors.XenError('Unimplemented', \
1730 opterr='Raw VDI, snapshot or clone not permitted')
1732 # we must activate the entire image chain because the real parent could
1733 # theoretically be anywhere in the chain if all images under it are empty
1734 self._chainSetActive(True, False)
1735 if not util.pathexists(self.path): 1735 ↛ 1736line 1735 didn't jump to line 1736, because the condition on line 1735 was never true
1736 raise xs_errors.XenError('VDIUnavailable', \
1737 opterr='VDI unavailable: %s' % (self.path))
1739 if VdiType.isCowImage(self.vdi_type): 1739 ↛ 1747line 1739 didn't jump to line 1747, because the condition on line 1739 was never false
1740 depth = self.cowutil.getDepth(self.path)
1741 if depth == -1: 1741 ↛ 1742line 1741 didn't jump to line 1742, because the condition on line 1741 was never true
1742 raise xs_errors.XenError('VDIUnavailable', \
1743 opterr='failed to get COW depth')
1744 elif depth >= self.cowutil.getMaxChainLength(): 1744 ↛ 1745line 1744 didn't jump to line 1745, because the condition on line 1744 was never true
1745 raise xs_errors.XenError('SnapshotChainTooLong')
1747 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1748 self.sr.srcmd.params['vdi_ref'])
1750 fullpr = self.lvmcowutil.calcVolumeSize(self.size)
1751 thinpr = util.roundup(
1752 lvutil.LVM_SIZE_INCREMENT,
1753 self.cowutil.calcOverheadEmpty(max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()))
1754 )
1755 lvSizeOrig = thinpr
1756 lvSizeClon = thinpr
1758 hostRefs = []
1759 if self.sr.cmd == "vdi_snapshot":
1760 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1761 if hostRefs: 1761 ↛ 1763line 1761 didn't jump to line 1763, because the condition on line 1761 was never false
1762 lvSizeOrig = fullpr
1763 if self.sr.provision == "thick": 1763 ↛ 1769line 1763 didn't jump to line 1769, because the condition on line 1763 was never false
1764 if not self.issnap: 1764 ↛ 1765line 1764 didn't jump to line 1765, because the condition on line 1764 was never true
1765 lvSizeOrig = fullpr
1766 if self.sr.cmd != "vdi_snapshot":
1767 lvSizeClon = fullpr
1769 if (snapType == VDI.SNAPSHOT_SINGLE or 1769 ↛ 1771line 1769 didn't jump to line 1771, because the condition on line 1769 was never true
1770 snapType == VDI.SNAPSHOT_INTERNAL):
1771 lvSizeClon = 0
1773 # the space required must include 2 journal LVs: a clone journal and an
1774 # inflate journal (for the failure handling
1775 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1776 lvSizeBase = self.size
1777 if VdiType.isCowImage(self.vdi_type): 1777 ↛ 1780line 1777 didn't jump to line 1780, because the condition on line 1777 was never false
1778 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, self.cowutil.getSizePhys(self.path))
1779 size_req -= (self.utilisation - lvSizeBase)
1780 self.sr._ensureSpaceAvailable(size_req)
1782 if hostRefs:
1783 self.sr._updateSlavesPreClone(hostRefs, self.lvname)
1785 baseUuid = util.gen_uuid()
1786 origUuid = self.uuid
1787 clonUuid = ""
1788 if snapType == VDI.SNAPSHOT_DOUBLE: 1788 ↛ 1790line 1788 didn't jump to line 1790, because the condition on line 1788 was never false
1789 clonUuid = util.gen_uuid()
1790 jval = "%s_%s" % (baseUuid, clonUuid)
1791 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1792 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1794 try:
1795 # self becomes the "base vdi"
1796 origOldLV = self.lvname
1797 baseLV = LV_PREFIX[self.vdi_type] + baseUuid
1798 self.sr.lvmCache.rename(self.lvname, baseLV)
1799 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1800 RefCounter.set(baseUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1801 self.uuid = baseUuid
1802 self.lvname = baseLV
1803 self.path = os.path.join(self.sr.path, baseLV)
1804 self.label = "base copy"
1805 self.read_only = True
1806 self.location = self.uuid
1807 self.managed = False
1809 # shrink the base copy to the minimum - we do it before creating
1810 # the snapshot volumes to avoid requiring double the space
1811 if VdiType.isCowImage(self.vdi_type): 1811 ↛ 1814line 1811 didn't jump to line 1814, because the condition on line 1811 was never false
1812 self.lvmcowutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1813 self.utilisation = lvSizeBase
1814 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1816 snapVDI = self._createSnap(origUuid, snapVdiType, lvSizeOrig, False, is_mirror_destination)
1817 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1818 snapVDI2 = None
1819 if snapType == VDI.SNAPSHOT_DOUBLE: 1819 ↛ 1825line 1819 didn't jump to line 1825, because the condition on line 1819 was never false
1820 snapVDI2 = self._createSnap(clonUuid, snapVdiType, lvSizeClon, True)
1821 # If we have CBT enabled on the VDI,
1822 # set CBT status for the new snapshot disk
1823 if cbtlog:
1824 snapVDI2.cbt_enabled = True
1825 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1827 # note: it is important to mark the parent hidden only AFTER the
1828 # new image children have been created, which are referencing it;
1829 # otherwise we would introduce a race with GC that could reclaim
1830 # the parent before we snapshot it
1831 if not VdiType.isCowImage(self.vdi_type): 1831 ↛ 1832line 1831 didn't jump to line 1832, because the condition on line 1831 was never true
1832 self.sr.lvmCache.setHidden(self.lvname)
1833 else:
1834 self.cowutil.setHidden(self.path)
1835 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1837 # set the base copy to ReadOnly
1838 self.sr.lvmCache.setReadonly(self.lvname, True)
1839 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1841 if hostRefs:
1842 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1843 snapVDI.lvname, self.uuid, self.lvname)
1845 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1846 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1847 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1848 if hostRefs: 1848 ↛ 1862line 1848 didn't jump to line 1862, because the condition on line 1848 was never false
1849 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1850 try:
1851 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1852 except:
1853 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1854 alert_str = ("Creating CBT snapshot for {} failed"
1855 .format(snapVDI.uuid))
1856 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1857 pass
1859 except (util.SMException, XenAPI.Failure) as e:
1860 util.logException("LVMVDI._snapshot")
1861 self._failClone(origUuid, jval, str(e))
1862 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1864 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1866 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1868 def _createSnap(self, snapUuid, snapVdiType, snapSizeLV, isNew, is_mirror_destination=False):
1869 """Snapshot self and return the snapshot VDI object"""
1871 snapLV = LV_PREFIX[snapVdiType] + snapUuid
1872 snapPath = os.path.join(self.sr.path, snapLV)
1873 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1874 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1875 if isNew:
1876 RefCounter.set(snapUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1877 self.sr.lvActivator.add(snapUuid, snapLV, False)
1878 parentRaw = (self.vdi_type == VdiType.RAW)
1879 self.cowutil.snapshot(
1880 snapPath, self.path, parentRaw, max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()), is_mirror_image=is_mirror_destination
1881 )
1882 snapParent = self.cowutil.getParent(snapPath, LvmCowUtil.extractUuid)
1884 snapVDI = LVMVDI(self.sr, snapUuid)
1885 snapVDI.read_only = False
1886 snapVDI.location = snapUuid
1887 snapVDI.size = self.size
1888 snapVDI.utilisation = snapSizeLV
1889 snapVDI.sm_config = dict()
1890 for key, val in self.sm_config.items(): 1890 ↛ 1891line 1890 didn't jump to line 1891, because the loop on line 1890 never started
1891 if key not in [
1892 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1893 not key.startswith("host_"):
1894 snapVDI.sm_config[key] = val
1895 snapVDI.sm_config["vdi_type"] = snapVdiType
1896 snapVDI.sm_config["vhd-parent"] = snapParent
1897 # TODO: fix the raw snapshot case
1898 snapVDI.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1899 snapVDI.lvname = snapLV
1900 return snapVDI
1902 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1903 if snapType is not VDI.SNAPSHOT_INTERNAL: 1903 ↛ 1905line 1903 didn't jump to line 1905, because the condition on line 1903 was never false
1904 self.sr._updateStats(self.sr.uuid, self.size)
1905 basePresent = True
1907 # Verify parent locator field of both children and delete basePath if
1908 # unused
1909 snapParent = snapVDI.sm_config["vhd-parent"]
1910 snap2Parent = ""
1911 if snapVDI2: 1911 ↛ 1913line 1911 didn't jump to line 1913, because the condition on line 1911 was never false
1912 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1913 if snapParent != self.uuid and \ 1913 ↛ 1940line 1913 didn't jump to line 1940, because the condition on line 1913 was never false
1914 (not snapVDI2 or snap2Parent != self.uuid):
1915 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1916 (snapParent, self.uuid, snap2Parent, self.lvname))
1917 RefCounter.put(self.uuid, False, NS_PREFIX_LVM + self.sr.uuid)
1918 self.sr.lvmCache.remove(self.lvname)
1919 self.sr.lvActivator.remove(self.uuid, False)
1920 if hostRefs:
1921 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1922 basePresent = False
1923 else:
1924 # assign the _binary_ refcount of the original VDI to the new base
1925 # VDI (but as the normal refcount, since binary refcounts are only
1926 # for leaf nodes). The normal refcount of the child is not
1927 # transferred to to the base VDI because normal refcounts are
1928 # incremented and decremented individually, and not based on the
1929 # image chain (i.e., the child's normal refcount will be decremented
1930 # independently of its parent situation). Add 1 for this clone op.
1931 # Note that we do not need to do protect the refcount operations
1932 # below with per-VDI locking like we do in lvutil because at this
1933 # point we have exclusive access to the VDIs involved. Other SM
1934 # operations are serialized by the Agent or with the SR lock, and
1935 # any coalesce activations are serialized with the SR lock. (The
1936 # coalesce activates the coalesced VDI pair in the beginning, which
1937 # cannot affect the VDIs here because they cannot possibly be
1938 # involved in coalescing at this point, and at the relinkSkip step
1939 # that activates the children, which takes the SR lock.)
1940 ns = NS_PREFIX_LVM + self.sr.uuid
1941 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1942 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1944 # the "paused" and "host_*" sm-config keys are special and must stay on
1945 # the leaf without being inherited by anyone else
1946 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1946 ↛ 1947line 1946 didn't jump to line 1947, because the loop on line 1946 never started
1947 snapVDI.sm_config[key] = self.sm_config[key]
1948 del self.sm_config[key]
1950 # Introduce any new VDI records & update the existing one
1951 type = self.session.xenapi.VDI.get_type( \
1952 self.sr.srcmd.params['vdi_ref'])
1953 if snapVDI2: 1953 ↛ 1995line 1953 didn't jump to line 1995, because the condition on line 1953 was never false
1954 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1955 vdiRef = snapVDI2._db_introduce()
1956 if cloneOp:
1957 vdi_info = {UUID_TAG: snapVDI2.uuid,
1958 NAME_LABEL_TAG: util.to_plain_string( \
1959 self.session.xenapi.VDI.get_name_label( \
1960 self.sr.srcmd.params['vdi_ref'])),
1961 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1962 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1963 IS_A_SNAPSHOT_TAG: 0,
1964 SNAPSHOT_OF_TAG: '',
1965 SNAPSHOT_TIME_TAG: '',
1966 TYPE_TAG: type,
1967 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1968 READ_ONLY_TAG: 0,
1969 MANAGED_TAG: int(snapVDI2.managed),
1970 METADATA_OF_POOL_TAG: ''
1971 }
1972 else:
1973 util.SMlog("snapshot VDI params: %s" % \
1974 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
1975 vdi_info = {UUID_TAG: snapVDI2.uuid,
1976 NAME_LABEL_TAG: util.to_plain_string( \
1977 self.session.xenapi.VDI.get_name_label( \
1978 self.sr.srcmd.params['vdi_ref'])),
1979 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1980 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1981 IS_A_SNAPSHOT_TAG: 1,
1982 SNAPSHOT_OF_TAG: snapVDI.uuid,
1983 SNAPSHOT_TIME_TAG: '',
1984 TYPE_TAG: type,
1985 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1986 READ_ONLY_TAG: 0,
1987 MANAGED_TAG: int(snapVDI2.managed),
1988 METADATA_OF_POOL_TAG: ''
1989 }
1991 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1992 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
1993 (vdiRef, snapVDI2.uuid))
1995 if basePresent: 1995 ↛ 1996line 1995 didn't jump to line 1996, because the condition on line 1995 was never true
1996 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1997 vdiRef = self._db_introduce()
1998 vdi_info = {UUID_TAG: self.uuid,
1999 NAME_LABEL_TAG: self.label,
2000 NAME_DESCRIPTION_TAG: self.description,
2001 IS_A_SNAPSHOT_TAG: 0,
2002 SNAPSHOT_OF_TAG: '',
2003 SNAPSHOT_TIME_TAG: '',
2004 TYPE_TAG: type,
2005 VDI_TYPE_TAG: self.sm_config['vdi_type'],
2006 READ_ONLY_TAG: 1,
2007 MANAGED_TAG: 0,
2008 METADATA_OF_POOL_TAG: ''
2009 }
2011 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
2012 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
2013 (vdiRef, self.uuid))
2015 # Update the original record
2016 vdi_ref = self.sr.srcmd.params['vdi_ref']
2017 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
2018 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
2019 str(snapVDI.utilisation))
2021 # Return the info on the new snap VDI
2022 snap = snapVDI2
2023 if not snap: 2023 ↛ 2024line 2023 didn't jump to line 2024, because the condition on line 2023 was never true
2024 snap = self
2025 if not basePresent:
2026 # a single-snapshot of an empty VDI will be a noop, resulting
2027 # in no new VDIs, so return the existing one. The GC wouldn't
2028 # normally try to single-snapshot an empty image of course, but
2029 # if an external snapshot operation manages to sneak in right
2030 # before a snapshot-coalesce phase, we would get here
2031 snap = snapVDI
2032 return snap.get_params()
2034 def _setType(self, vdiType: str) -> None:
2035 self.vdi_type = vdiType
2036 self.cowutil = getCowUtil(self.vdi_type)
2037 self.lvmcowutil = LvmCowUtil(self.cowutil)
2039 def _initFromVDIInfo(self, vdiInfo):
2040 self._setType(vdiInfo.vdiType)
2041 self.lvname = vdiInfo.lvName
2042 self.size = vdiInfo.sizeVirt
2043 self.utilisation = vdiInfo.sizeLV
2044 self.hidden = vdiInfo.hidden
2045 if self.hidden: 2045 ↛ 2046line 2045 didn't jump to line 2046, because the condition on line 2045 was never true
2046 self.managed = False
2047 self.active = vdiInfo.lvActive
2048 self.readonly = vdiInfo.lvReadonly
2049 self.parent = vdiInfo.parentUuid
2050 self.path = os.path.join(self.sr.path, self.lvname)
2051 if hasattr(self, "sm_config_override"): 2051 ↛ 2054line 2051 didn't jump to line 2054, because the condition on line 2051 was never false
2052 self.sm_config_override["vdi_type"] = self.vdi_type
2053 else:
2054 self.sm_config_override = {'vdi_type': self.vdi_type}
2055 self.loaded = True
2057 def _initFromLVInfo(self, lvInfo):
2058 self._setType(lvInfo.vdiType)
2059 self.lvname = lvInfo.name
2060 self.size = lvInfo.size
2061 self.utilisation = lvInfo.size
2062 self.hidden = lvInfo.hidden
2063 self.active = lvInfo.active
2064 self.readonly = lvInfo.readonly
2065 self.parent = ''
2066 self.path = os.path.join(self.sr.path, self.lvname)
2067 if hasattr(self, "sm_config_override"): 2067 ↛ 2070line 2067 didn't jump to line 2070, because the condition on line 2067 was never false
2068 self.sm_config_override["vdi_type"] = self.vdi_type
2069 else:
2070 self.sm_config_override = {'vdi_type': self.vdi_type}
2071 if 'vhd-parent' in self.sm_config_override: 2071 ↛ 2072line 2071 didn't jump to line 2072, because the condition on line 2071 was never true
2072 self.parent = self.sm_config_override['vhd-parent']
2073 if not VdiType.isCowImage(self.vdi_type): 2073 ↛ 2074line 2073 didn't jump to line 2074, because the condition on line 2073 was never true
2074 self.loaded = True
2076 def _initFromImageInfo(self, imageInfo):
2077 self.size = imageInfo.sizeVirt
2078 if self.parent == '' or (imageInfo.parentUuid != '' and imageInfo.parentUuid != self.parent): 2078 ↛ 2080line 2078 didn't jump to line 2080, because the condition on line 2078 was never false
2079 self.parent = imageInfo.parentUuid
2080 self.hidden = imageInfo.hidden
2081 self.loaded = True
2083 def _determineType(self):
2084 """
2085 Determine whether this is a RAW or a COW VDI.
2086 """
2087 if "vdi_ref" in self.sr.srcmd.params:
2088 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2089 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2090 if sm_config.get("vdi_type"): 2090 ↛ 2091line 2090 didn't jump to line 2091, because the condition on line 2090 was never true
2091 self._setType(sm_config["vdi_type"])
2092 prefix = LV_PREFIX[self.vdi_type]
2093 self.lvname = "%s%s" % (prefix, self.uuid)
2094 self.path = os.path.join(self.sr.path, self.lvname)
2095 self.sm_config_override = sm_config
2096 return True
2098 # LVM commands can be costly, so check the file directly first in case
2099 # the LV is active
2100 found = False
2101 for vdi_type, prefix in LV_PREFIX.items():
2102 lvname = "%s%s" % (prefix, self.uuid)
2103 path = os.path.join(self.sr.path, lvname)
2104 if util.pathexists(path):
2105 if found: 2105 ↛ 2106line 2105 didn't jump to line 2106, because the condition on line 2105 was never true
2106 raise xs_errors.XenError('VDILoad',
2107 opterr="multiple VDI's: uuid %s" % self.uuid)
2108 found = True
2109 self._setType(vdi_type)
2110 self.lvname = lvname
2111 self.path = path
2112 if found:
2113 return True
2115 # now list all LV's
2116 if not lvutil._checkVG(self.sr.vgname): 2116 ↛ 2118line 2116 didn't jump to line 2118, because the condition on line 2116 was never true
2117 # when doing attach_from_config, the VG won't be there yet
2118 return False
2120 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache)
2121 if lvs.get(self.uuid): 2121 ↛ 2124line 2121 didn't jump to line 2124, because the condition on line 2121 was never false
2122 self._initFromLVInfo(lvs[self.uuid])
2123 return True
2124 return False
2126 def _loadThis(self):
2127 """
2128 Load VDI info for this VDI and activate the LV if it's COW. We
2129 don't do it in VDI.load() because not all VDI operations need it.
2130 """
2131 if self.loaded: 2131 ↛ 2132line 2131 didn't jump to line 2132, because the condition on line 2131 was never true
2132 if VdiType.isCowImage(self.vdi_type):
2133 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2134 return
2135 try:
2136 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache, self.lvname)
2137 except util.CommandException as e:
2138 raise xs_errors.XenError('VDIUnavailable',
2139 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2140 if not lvs.get(self.uuid): 2140 ↛ 2141line 2140 didn't jump to line 2141, because the condition on line 2140 was never true
2141 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2142 self._initFromLVInfo(lvs[self.uuid])
2143 if VdiType.isCowImage(self.vdi_type): 2143 ↛ 2149line 2143 didn't jump to line 2149, because the condition on line 2143 was never false
2144 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2145 imageInfo = self.cowutil.getInfo(self.path, LvmCowUtil.extractUuid, False)
2146 if not imageInfo: 2146 ↛ 2147line 2146 didn't jump to line 2147, because the condition on line 2146 was never true
2147 raise xs_errors.XenError('VDIUnavailable', opterr='getInfo failed')
2148 self._initFromImageInfo(imageInfo)
2149 self.loaded = True
2151 def _chainSetActive(self, active, binary, persistent=False):
2152 if binary: 2152 ↛ 2153line 2152 didn't jump to line 2153, because the condition on line 2152 was never true
2153 (count, bcount) = RefCounter.checkLocked(self.uuid,
2154 NS_PREFIX_LVM + self.sr.uuid)
2155 if (active and bcount > 0) or (not active and bcount == 0):
2156 return # this is a redundant activation/deactivation call
2158 vdiList = {self.uuid: self.lvname}
2159 if VdiType.isCowImage(self.vdi_type): 2159 ↛ 2161line 2159 didn't jump to line 2161, because the condition on line 2159 was never false
2160 vdiList = self.cowutil.getParentChain(self.lvname, LvmCowUtil.extractUuid, self.sr.vgname)
2161 for uuid, lvName in vdiList.items(): 2161 ↛ 2162line 2161 didn't jump to line 2162, because the loop on line 2161 never started
2162 binaryParam = binary
2163 if uuid != self.uuid:
2164 binaryParam = False # binary param only applies to leaf nodes
2165 if active:
2166 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2167 persistent)
2168 else:
2169 # just add the LVs for deactivation in the final (cleanup)
2170 # step. The LVs must not have been activated during the current
2171 # operation
2172 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2174 def _failClone(self, uuid, jval, msg):
2175 try:
2176 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2177 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2178 except Exception as e:
2179 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2180 ' %s (error ignored)' % e)
2181 raise xs_errors.XenError('VDIClone', opterr=msg)
2183 def _markHidden(self):
2184 if not VdiType.isCowImage(self.vdi_type):
2185 self.sr.lvmCache.setHidden(self.lvname)
2186 else:
2187 self.cowutil.setHidden(self.path)
2188 self.hidden = 1
2190 def _prepareThin(self, attach, vdiType):
2191 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2192 if self.sr.isMaster:
2193 # the master can prepare the VDI locally
2194 if attach:
2195 self.lvmcowutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type)
2196 else:
2197 self.lvmcowutil.detachThin(self.session, self.sr.lvmCache, self.sr.uuid, self.uuid, self.vdi_type)
2198 else:
2199 fn = "attach"
2200 if not attach:
2201 fn = "detach"
2202 pools = self.session.xenapi.pool.get_all()
2203 master = self.session.xenapi.pool.get_master(pools[0])
2204 rv = self.session.xenapi.host.call_plugin(
2205 master,
2206 self.sr.THIN_PLUGIN,
2207 fn,
2208 {
2209 "srUuid": self.sr.uuid,
2210 "vdiUuid": self.uuid,
2211 "vdiType": vdiType
2212 }
2213 )
2214 util.SMlog("call-plugin returned: %s" % rv)
2215 if not rv:
2216 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2217 # refresh to pick up the size change on this slave
2218 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2220 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2221 if origUtilisation != self.utilisation:
2222 vdi_ref = self.sr.srcmd.params['vdi_ref']
2223 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2224 str(self.utilisation))
2225 stats = lvutil._getVGstats(self.sr.vgname)
2226 sr_utilisation = stats['physical_utilisation']
2227 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2228 str(sr_utilisation))
2230 @override
2231 def update(self, sr_uuid, vdi_uuid) -> None:
2232 if self.sr.legacyMode:
2233 return
2235 #Synch the name_label of this VDI on storage with the name_label in XAPI
2236 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2237 update_map = {}
2238 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2239 METADATA_OBJECT_TYPE_VDI
2240 update_map[UUID_TAG] = self.uuid
2241 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2242 self.session.xenapi.VDI.get_name_label(vdi_ref))
2243 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2244 self.session.xenapi.VDI.get_name_description(vdi_ref))
2245 update_map[SNAPSHOT_TIME_TAG] = \
2246 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2247 update_map[METADATA_OF_POOL_TAG] = \
2248 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2249 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2251 @override
2252 def _ensure_cbt_space(self) -> None:
2253 self.sr.ensureCBTSpace()
2255 @override
2256 def _create_cbt_log(self) -> str:
2257 logname = self._get_cbt_logname(self.uuid)
2258 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG)
2259 logpath = super(LVMVDI, self)._create_cbt_log()
2260 self.sr.lvmCache.deactivateNoRefcount(logname)
2261 return logpath
2263 @override
2264 def _delete_cbt_log(self) -> None:
2265 logpath = self._get_cbt_logpath(self.uuid)
2266 if self._cbt_log_exists(logpath):
2267 logname = self._get_cbt_logname(self.uuid)
2268 self.sr.lvmCache.remove(logname)
2270 @override
2271 def _rename(self, oldpath, newpath) -> None:
2272 oldname = os.path.basename(oldpath)
2273 newname = os.path.basename(newpath)
2274 self.sr.lvmCache.rename(oldname, newname)
2276 @override
2277 def update_slaves_on_cbt_disable(self, cbtlog) -> None:
2278 args = {
2279 "vgName": self.sr.vgname,
2280 "action1": "deactivateNoRefcount",
2281 "lvName1": cbtlog
2282 }
2284 host_refs = util.get_hosts_attached_on(self.session, [self.uuid])
2286 message = f"Deactivating {cbtlog}"
2287 self.sr.call_on_slave(args, host_refs, message)
2289 @override
2290 def _activate_cbt_log(self, lv_name) -> bool:
2291 self.sr.lvmCache.refresh()
2292 if not self.sr.lvmCache.is_active(lv_name): 2292 ↛ 2293line 2292 didn't jump to line 2293, because the condition on line 2292 was never true
2293 try:
2294 self.sr.lvmCache.activateNoRefcount(lv_name)
2295 return True
2296 except Exception as e:
2297 util.SMlog("Exception in _activate_cbt_log, "
2298 "Error: %s." % str(e))
2299 raise
2300 else:
2301 return False
2303 @override
2304 def _deactivate_cbt_log(self, lv_name) -> None:
2305 try:
2306 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2307 except Exception as e:
2308 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2309 raise
2311 @override
2312 def _cbt_log_exists(self, logpath) -> bool:
2313 return lvutil.exists(logpath)
2315if __name__ == '__main__': 2315 ↛ 2316line 2315 didn't jump to line 2316, because the condition on line 2315 was never true
2316 SRCommand.run(LVMSR, DRIVER_INFO)
2317else:
2318 SR.registerSR(LVMSR)