Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# LVMSR: VHD and QCOW2 on LVM storage repository 

19# 

20 

21from sm_typing import Dict, List, override 

22 

23import SR 

24from SR import deviceCheck 

25import VDI 

26import SRCommand 

27import util 

28import lvutil 

29import lvmcache 

30import scsiutil 

31import lock 

32import os 

33import sys 

34import time 

35import errno 

36import xs_errors 

37import cleanup 

38import blktap2 

39from journaler import Journaler 

40from refcounter import RefCounter 

41from ipc import IPCFlag 

42from constants import NS_PREFIX_LVM, VG_LOCATION, VG_PREFIX 

43from cowutil import CowUtil, getCowUtil, getImageStringFromVdiType, getVdiTypeFromImageFormat 

44from lvmcowutil import LV_PREFIX, LvmCowUtil 

45from lvmanager import LVActivator 

46from vditype import VdiType 

47import XenAPI # pylint: disable=import-error 

48import re 

49from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \ 

50 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ 

51 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ 

52 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ 

53 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG 

54from metadata import retrieveXMLfromFile, _parseXML 

55from xmlrpc.client import DateTime 

56import glob 

57from constants import CBTLOG_TAG 

58from fairlock import Fairlock 

59DEV_MAPPER_ROOT = os.path.join('/dev/mapper', VG_PREFIX) 

60 

61geneology: Dict[str, List[str]] = {} 

62CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM", 

63 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", 

64 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", 

65 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", 

66 "VDI_ACTIVATE", "VDI_DEACTIVATE"] 

67 

68CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']] 

69 

70DRIVER_INFO = { 

71 'name': 'Local VHD and QCOW2 on LVM', 

72 'description': 'SR plugin which represents disks as VHD and QCOW2 disks on ' + \ 

73 'Logical Volumes within a locally-attached Volume Group', 

74 'vendor': 'XenSource Inc', 

75 'copyright': '(C) 2008 XenSource Inc', 

76 'driver_version': '1.0', 

77 'required_api_version': '1.0', 

78 'capabilities': CAPABILITIES, 

79 'configuration': CONFIGURATION 

80 } 

81 

82CREATE_PARAM_TYPES = { 

83 "raw": VdiType.RAW, 

84 "vhd": VdiType.VHD, 

85 "qcow2": VdiType.QCOW2 

86} 

87 

88OPS_EXCLUSIVE = [ 

89 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", 

90 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", 

91 "vdi_clone"] 

92 

93# Log if snapshot pauses VM for more than this many seconds 

94LONG_SNAPTIME = 60 

95 

96class LVMSR(SR.SR): 

97 DRIVER_TYPE = 'lvhd' 

98 

99 PROVISIONING_TYPES = ["thin", "thick"] 

100 PROVISIONING_DEFAULT = "thick" 

101 THIN_PLUGIN = "lvhd-thin" 

102 

103 PLUGIN_ON_SLAVE = "on-slave" 

104 

105 FLAG_USE_VHD = "use_vhd" 

106 MDVOLUME_NAME = "MGT" 

107 

108 ALLOCATION_QUANTUM = "allocation_quantum" 

109 INITIAL_ALLOCATION = "initial_allocation" 

110 

111 LOCK_RETRY_INTERVAL = 3 

112 LOCK_RETRY_ATTEMPTS = 10 

113 

114 TEST_MODE_KEY = "testmode" 

115 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin" 

116 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator" 

117 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end" 

118 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin" 

119 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data" 

120 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata" 

121 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end" 

122 

123 ENV_VAR_VHD_TEST = { 

124 TEST_MODE_VHD_FAIL_REPARENT_BEGIN: 

125 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", 

126 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: 

127 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", 

128 TEST_MODE_VHD_FAIL_REPARENT_END: 

129 "VHD_UTIL_TEST_FAIL_REPARENT_END", 

130 TEST_MODE_VHD_FAIL_RESIZE_BEGIN: 

131 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", 

132 TEST_MODE_VHD_FAIL_RESIZE_DATA: 

133 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", 

134 TEST_MODE_VHD_FAIL_RESIZE_METADATA: 

135 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", 

136 TEST_MODE_VHD_FAIL_RESIZE_END: 

137 "VHD_UTIL_TEST_FAIL_RESIZE_END" 

138 } 

139 testMode = "" 

140 

141 legacyMode = True 

142 

143 @override 

144 @staticmethod 

145 def handles(type) -> bool: 

146 """Returns True if this SR class understands the given dconf string""" 

147 # we can pose as LVMSR or EXTSR for compatibility purposes 

148 if __name__ == '__main__': 

149 name = sys.argv[0] 

150 else: 

151 name = __name__ 

152 if name.endswith("LVMSR"): 

153 return type == "lvm" 

154 elif name.endswith("EXTSR"): 

155 return type == "ext" 

156 return type == LVMSR.DRIVER_TYPE 

157 

158 def __init__(self, srcmd, sr_uuid): 

159 SR.SR.__init__(self, srcmd, sr_uuid) 

160 self._init_preferred_image_formats() 

161 

162 @override 

163 def load(self, sr_uuid) -> None: 

164 self.ops_exclusive = OPS_EXCLUSIVE 

165 

166 self.isMaster = False 

167 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true': 

168 self.isMaster = True 

169 

170 self.lock = lock.Lock(lock.LOCK_TYPE_SR, self.uuid) 

171 self.sr_vditype = SR.DEFAULT_TAP 

172 self.uuid = sr_uuid 

173 self.vgname = VG_PREFIX + self.uuid 

174 self.path = os.path.join(VG_LOCATION, self.vgname) 

175 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME) 

176 self.provision = self.PROVISIONING_DEFAULT 

177 

178 has_sr_ref = self.srcmd.params.get("sr_ref") 

179 if has_sr_ref: 

180 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) 

181 else: 

182 self.other_conf = None 

183 

184 self.lvm_conf = None 

185 if self.other_conf: 

186 self.lvm_conf = self.other_conf.get('lvm-conf') 

187 

188 try: 

189 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf) 

190 except: 

191 raise xs_errors.XenError('SRUnavailable', \ 

192 opterr='Failed to initialise the LVMCache') 

193 self.lvActivator = LVActivator(self.uuid, self.lvmCache) 

194 self.journaler = Journaler(self.lvmCache) 

195 if not has_sr_ref: 

196 return # must be a probe call 

197 # Test for thick vs thin provisioning conf parameter 

198 if 'allocation' in self.dconf: 198 ↛ 199line 198 didn't jump to line 199, because the condition on line 198 was never true

199 if self.dconf['allocation'] in self.PROVISIONING_TYPES: 

200 self.provision = self.dconf['allocation'] 

201 else: 

202 raise xs_errors.XenError('InvalidArg', \ 

203 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES) 

204 

205 if self.other_conf.get(self.TEST_MODE_KEY): 205 ↛ 209line 205 didn't jump to line 209, because the condition on line 205 was never false

206 self.testMode = self.other_conf[self.TEST_MODE_KEY] 

207 self._prepareTestMode() 

208 

209 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

210 # sm_config flag overrides PBD, if any 

211 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: 

212 self.provision = self.sm_config.get('allocation') 

213 

214 if self.sm_config.get(self.FLAG_USE_VHD) == "true": 

215 self.legacyMode = False 

216 

217 if lvutil._checkVG(self.vgname): 

218 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 218 ↛ 221line 218 didn't jump to line 221, because the condition on line 218 was never false

219 "vdi_activate", "vdi_deactivate"]: 

220 self._undoAllJournals() 

221 if not self.cmd in ["sr_attach", "sr_probe"]: 

222 self._checkMetadataVolume() 

223 

224 self.mdexists = False 

225 

226 # get a VDI -> TYPE map from the storage 

227 contains_uuid_regex = \ 

228 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") 

229 self.storageVDIs = {} 

230 

231 for key in self.lvmCache.lvs.keys(): 231 ↛ 233line 231 didn't jump to line 233, because the loop on line 231 never started

232 # if the lvname has a uuid in it 

233 type = None 

234 vdi = None 

235 if contains_uuid_regex.search(key) is not None: 

236 for vdi_type, prefix in LV_PREFIX.items(): 

237 if key.startswith(prefix): 

238 vdi = key[len(prefix):] 

239 self.storageVDIs[vdi] = vdi_type 

240 break 

241 

242 # check if metadata volume exists 

243 try: 

244 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

245 except: 

246 pass 

247 

248 @override 

249 def cleanup(self) -> None: 

250 # we don't need to hold the lock to dec refcounts of activated LVs 

251 if not self.lvActivator.deactivateAll(): 251 ↛ 252line 251 didn't jump to line 252, because the condition on line 251 was never true

252 raise util.SMException("failed to deactivate LVs") 

253 

254 def updateSRMetadata(self, allocation): 

255 try: 

256 # Add SR specific SR metadata 

257 sr_info = \ 

258 {ALLOCATION_TAG: allocation, 

259 UUID_TAG: self.uuid, 

260 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)), 

261 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref)) 

262 } 

263 

264 vdi_info = {} 

265 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): 

266 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi) 

267 

268 # Create the VDI entry in the SR metadata 

269 vdi_info[vdi_uuid] = \ 

270 { 

271 UUID_TAG: vdi_uuid, 

272 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)), 

273 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)), 

274 IS_A_SNAPSHOT_TAG: \ 

275 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), 

276 SNAPSHOT_OF_TAG: \ 

277 self.session.xenapi.VDI.get_snapshot_of(vdi), 

278 SNAPSHOT_TIME_TAG: \ 

279 self.session.xenapi.VDI.get_snapshot_time(vdi), 

280 TYPE_TAG: \ 

281 self.session.xenapi.VDI.get_type(vdi), 

282 VDI_TYPE_TAG: \ 

283 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'], 

284 READ_ONLY_TAG: \ 

285 int(self.session.xenapi.VDI.get_read_only(vdi)), 

286 METADATA_OF_POOL_TAG: \ 

287 self.session.xenapi.VDI.get_metadata_of_pool(vdi), 

288 MANAGED_TAG: \ 

289 int(self.session.xenapi.VDI.get_managed(vdi)) 

290 } 

291 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info) 

292 

293 except Exception as e: 

294 raise xs_errors.XenError('MetadataError', \ 

295 opterr='Error upgrading SR Metadata: %s' % str(e)) 

296 

297 def syncMetadataAndStorage(self): 

298 try: 

299 # if a VDI is present in the metadata but not in the storage 

300 # then delete it from the metadata 

301 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

302 for vdi in list(vdi_info.keys()): 

303 update_map = {} 

304 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 304 ↛ 311line 304 didn't jump to line 311, because the condition on line 304 was never false

305 # delete this from metadata 

306 LVMMetadataHandler(self.mdpath). \ 

307 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) 

308 else: 

309 # search for this in the metadata, compare types 

310 # self.storageVDIs is a map of vdi_uuid to vdi_type 

311 if vdi_info[vdi][VDI_TYPE_TAG] != \ 

312 self.storageVDIs[vdi_info[vdi][UUID_TAG]]: 

313 # storage type takes authority 

314 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ 

315 = METADATA_OBJECT_TYPE_VDI 

316 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] 

317 update_map[VDI_TYPE_TAG] = \ 

318 self.storageVDIs[vdi_info[vdi][UUID_TAG]] 

319 LVMMetadataHandler(self.mdpath) \ 

320 .updateMetadata(update_map) 

321 else: 

322 # This should never happen 

323 pass 

324 

325 except Exception as e: 

326 raise xs_errors.XenError('MetadataError', \ 

327 opterr='Error synching SR Metadata and storage: %s' % str(e)) 

328 

329 def syncMetadataAndXapi(self): 

330 try: 

331 # get metadata 

332 (sr_info, vdi_info) = \ 

333 LVMMetadataHandler(self.mdpath, False).getMetadata() 

334 

335 # First synch SR parameters 

336 self.update(self.uuid) 

337 

338 # Now update the VDI information in the metadata if required 

339 for vdi_offset in vdi_info.keys(): 

340 try: 

341 vdi_ref = \ 

342 self.session.xenapi.VDI.get_by_uuid( \ 

343 vdi_info[vdi_offset][UUID_TAG]) 

344 except: 

345 # may be the VDI is not in XAPI yet dont bother 

346 continue 

347 

348 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref)) 

349 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref)) 

350 

351 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ 

352 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ 

353 new_name_description: 

354 update_map = {} 

355 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

356 METADATA_OBJECT_TYPE_VDI 

357 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] 

358 update_map[NAME_LABEL_TAG] = new_name_label 

359 update_map[NAME_DESCRIPTION_TAG] = new_name_description 

360 LVMMetadataHandler(self.mdpath) \ 

361 .updateMetadata(update_map) 

362 except Exception as e: 

363 raise xs_errors.XenError('MetadataError', \ 

364 opterr='Error synching SR Metadata and XAPI: %s' % str(e)) 

365 

366 def _checkMetadataVolume(self): 

367 util.SMlog("Entering _checkMetadataVolume") 

368 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) 

369 if self.isMaster: 369 ↛ 385line 369 didn't jump to line 385, because the condition on line 369 was never false

370 if self.mdexists and self.cmd == "sr_attach": 

371 try: 

372 # activate the management volume 

373 # will be deactivated at detach time 

374 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

375 self._synchSmConfigWithMetaData() 

376 util.SMlog("Sync SR metadata and the state on the storage.") 

377 self.syncMetadataAndStorage() 

378 self.syncMetadataAndXapi() 

379 except Exception as e: 

380 util.SMlog("Exception in _checkMetadataVolume, " \ 

381 "Error: %s." % str(e)) 

382 elif not self.mdexists and not self.legacyMode: 382 ↛ 385line 382 didn't jump to line 385, because the condition on line 382 was never false

383 self._introduceMetaDataVolume() 

384 

385 if self.mdexists: 

386 self.legacyMode = False 

387 

388 def _synchSmConfigWithMetaData(self): 

389 util.SMlog("Synching sm-config with metadata volume") 

390 

391 try: 

392 # get SR info from metadata 

393 sr_info = {} 

394 map = {} 

395 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0] 

396 

397 if sr_info == {}: 397 ↛ 398line 397 didn't jump to line 398, because the condition on line 397 was never true

398 raise Exception("Failed to get SR information from metadata.") 

399 

400 if "allocation" in sr_info: 400 ↛ 404line 400 didn't jump to line 404, because the condition on line 400 was never false

401 self.provision = sr_info.get("allocation") 

402 map['allocation'] = sr_info.get("allocation") 

403 else: 

404 raise Exception("Allocation key not found in SR metadata. " 

405 "SR info found: %s" % sr_info) 

406 

407 except Exception as e: 

408 raise xs_errors.XenError( 

409 'MetadataError', 

410 opterr='Error reading SR params from ' 

411 'metadata Volume: %s' % str(e)) 

412 try: 

413 map[self.FLAG_USE_VHD] = 'true' 

414 self.session.xenapi.SR.set_sm_config(self.sr_ref, map) 

415 except: 

416 raise xs_errors.XenError( 

417 'MetadataError', 

418 opterr='Error updating sm_config key') 

419 

420 def _introduceMetaDataVolume(self): 

421 util.SMlog("Creating Metadata volume") 

422 try: 

423 config = {} 

424 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024) 

425 

426 # activate the management volume, will be deactivated at detach time 

427 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) 

428 

429 name_label = util.to_plain_string( \ 

430 self.session.xenapi.SR.get_name_label(self.sr_ref)) 

431 name_description = util.to_plain_string( \ 

432 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

433 config[self.FLAG_USE_VHD] = "true" 

434 config['allocation'] = self.provision 

435 self.session.xenapi.SR.set_sm_config(self.sr_ref, config) 

436 

437 # Add the SR metadata 

438 self.updateSRMetadata(self.provision) 

439 except Exception as e: 

440 raise xs_errors.XenError('MetadataError', \ 

441 opterr='Error introducing Metadata Volume: %s' % str(e)) 

442 

443 def _removeMetadataVolume(self): 

444 if self.mdexists: 

445 try: 

446 self.lvmCache.remove(self.MDVOLUME_NAME) 

447 except: 

448 raise xs_errors.XenError('MetadataError', \ 

449 opterr='Failed to delete MGT Volume') 

450 

451 def _refresh_size(self): 

452 """ 

453 Refreshs the size of the backing device. 

454 Return true if all paths/devices agree on the same size. 

455 """ 

456 if hasattr(self, 'SCSIid'): 456 ↛ 458line 456 didn't jump to line 458, because the condition on line 456 was never true

457 # LVMoHBASR, LVMoISCSISR 

458 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) 

459 else: 

460 # LVMSR 

461 devices = self.dconf['device'].split(',') 

462 scsiutil.refreshdev(devices) 

463 return True 

464 

465 def _expand_size(self): 

466 """ 

467 Expands the size of the SR by growing into additional availiable 

468 space, if extra space is availiable on the backing device. 

469 Needs to be called after a successful call of _refresh_size. 

470 """ 

471 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] 

472 # We are comparing PV- with VG-sizes that are aligned. Need a threshold 

473 resizethreshold = 100 * 1024 * 1024 # 100MB 

474 devices = self.dconf['device'].split(',') 

475 totaldevicesize = 0 

476 for device in devices: 

477 totaldevicesize = totaldevicesize + scsiutil.getsize(device) 

478 if totaldevicesize >= (currentvgsize + resizethreshold): 

479 try: 

480 if hasattr(self, 'SCSIid'): 480 ↛ 482line 480 didn't jump to line 482, because the condition on line 480 was never true

481 # LVMoHBASR, LVMoISCSISR might have slaves 

482 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, 

483 getattr(self, 'SCSIid')) 

484 util.SMlog("LVMSR._expand_size for %s will resize the pv." % 

485 self.uuid) 

486 for pv in lvutil.get_pv_for_vg(self.vgname): 

487 lvutil.resizePV(pv) 

488 except: 

489 util.logException("LVMSR._expand_size for %s failed to resize" 

490 " the PV" % self.uuid) 

491 

492 @override 

493 @deviceCheck 

494 def create(self, uuid, size) -> None: 

495 util.SMlog("LVMSR.create for %s" % self.uuid) 

496 if not self.isMaster: 

497 util.SMlog('sr_create blocked for non-master') 

498 raise xs_errors.XenError('LVMMaster') 

499 

500 if lvutil._checkVG(self.vgname): 

501 raise xs_errors.XenError('SRExists') 

502 

503 # Check none of the devices already in use by other PBDs 

504 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']): 

505 raise xs_errors.XenError('SRInUse') 

506 

507 # Check serial number entry in SR records 

508 for dev in self.dconf['device'].split(','): 

509 if util.test_scsiserial(self.session, dev): 

510 raise xs_errors.XenError('SRInUse') 

511 

512 lvutil.createVG(self.dconf['device'], self.vgname) 

513 

514 #Update serial number string 

515 scsiutil.add_serial_record(self.session, self.sr_ref, \ 

516 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

517 

518 # since this is an SR.create turn off legacy mode 

519 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ 

520 self.FLAG_USE_VHD, 'true') 

521 

522 @override 

523 def delete(self, uuid) -> None: 

524 util.SMlog("LVMSR.delete for %s" % self.uuid) 

525 if not self.isMaster: 

526 raise xs_errors.XenError('LVMMaster') 

527 cleanup.gc_force(self.session, self.uuid) 

528 

529 success = True 

530 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

531 if util.extractSRFromDevMapper(fileName) != self.uuid: 

532 continue 

533 

534 if util.doesFileHaveOpenHandles(fileName): 

535 util.SMlog("LVMSR.delete: The dev mapper entry %s has open " \ 

536 "handles" % fileName) 

537 success = False 

538 continue 

539 

540 # Now attempt to remove the dev mapper entry 

541 if not lvutil.removeDevMapperEntry(fileName, False): 

542 success = False 

543 continue 

544 

545 try: 

546 lvname = os.path.basename(fileName.replace('-', '/'). \ 

547 replace('//', '-')) 

548 lpath = os.path.join(self.path, lvname) 

549 os.unlink(lpath) 

550 except OSError as e: 

551 if e.errno != errno.ENOENT: 

552 util.SMlog("LVMSR.delete: failed to remove the symlink for " \ 

553 "file %s. Error: %s" % (fileName, str(e))) 

554 success = False 

555 

556 if success: 

557 try: 

558 if util.pathexists(self.path): 

559 os.rmdir(self.path) 

560 except Exception as e: 

561 util.SMlog("LVMSR.delete: failed to remove the symlink " \ 

562 "directory %s. Error: %s" % (self.path, str(e))) 

563 success = False 

564 

565 self._removeMetadataVolume() 

566 self.lvmCache.refresh() 

567 if LvmCowUtil.getVolumeInfo(self.lvmCache): 

568 raise xs_errors.XenError('SRNotEmpty') 

569 

570 if not success: 

571 raise Exception("LVMSR delete failed, please refer to the log " \ 

572 "for details.") 

573 

574 lvutil.removeVG(self.dconf['device'], self.vgname) 

575 self._cleanup() 

576 

577 @override 

578 def attach(self, uuid) -> None: 

579 util.SMlog("LVMSR.attach for %s" % self.uuid) 

580 

581 self._cleanup(True) # in case of host crashes, if detach wasn't called 

582 

583 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 583 ↛ 584line 583 didn't jump to line 584, because the condition on line 583 was never true

584 raise xs_errors.XenError('SRUnavailable', \ 

585 opterr='no such volume group: %s' % self.vgname) 

586 

587 # Refresh the metadata status 

588 self._checkMetadataVolume() 

589 

590 refreshsizeok = self._refresh_size() 

591 

592 if self.isMaster: 592 ↛ 603line 592 didn't jump to line 603, because the condition on line 592 was never false

593 if refreshsizeok: 593 ↛ 597line 593 didn't jump to line 597, because the condition on line 593 was never false

594 self._expand_size() 

595 

596 # Update SCSIid string 

597 util.SMlog("Calling devlist_to_serial") 

598 scsiutil.add_serial_record( 

599 self.session, self.sr_ref, 

600 scsiutil.devlist_to_serialstring(self.dconf['device'].split(','))) 

601 

602 # Test Legacy Mode Flag and update if COW volumes exist 

603 if self.isMaster and self.legacyMode: 603 ↛ 604line 603 didn't jump to line 604, because the condition on line 603 was never true

604 vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache) 

605 for uuid, info in vdiInfo.items(): 

606 if VdiType.isCowImage(info.vdiType): 

607 self.legacyMode = False 

608 map = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

609 self._introduceMetaDataVolume() 

610 break 

611 

612 # Set the block scheduler 

613 for dev in self.dconf['device'].split(','): 

614 self.block_setscheduler(dev) 

615 

616 @override 

617 def detach(self, uuid) -> None: 

618 util.SMlog("LVMSR.detach for %s" % self.uuid) 

619 cleanup.abort(self.uuid) 

620 

621 # Do a best effort cleanup of the dev mapper entries 

622 # go through all devmapper entries for this VG 

623 success = True 

624 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'): 

625 if util.extractSRFromDevMapper(fileName) != self.uuid: 625 ↛ 626line 625 didn't jump to line 626, because the condition on line 625 was never true

626 continue 

627 

628 with Fairlock('devicemapper'): 

629 # check if any file has open handles 

630 if util.doesFileHaveOpenHandles(fileName): 

631 # if yes, log this and signal failure 

632 util.SMlog( 

633 f"LVMSR.detach: The dev mapper entry {fileName} has " 

634 "open handles") 

635 success = False 

636 continue 

637 

638 # Now attempt to remove the dev mapper entry 

639 if not lvutil.removeDevMapperEntry(fileName, False): 639 ↛ 640line 639 didn't jump to line 640, because the condition on line 639 was never true

640 success = False 

641 continue 

642 

643 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/* 

644 try: 

645 lvname = os.path.basename(fileName.replace('-', '/'). \ 

646 replace('//', '-')) 

647 lvname = os.path.join(self.path, lvname) 

648 util.force_unlink(lvname) 

649 except Exception as e: 

650 util.SMlog("LVMSR.detach: failed to remove the symlink for " \ 

651 "file %s. Error: %s" % (fileName, str(e))) 

652 success = False 

653 

654 # now remove the directory where the symlinks are 

655 # this should pass as the directory should be empty by now 

656 if success: 

657 try: 

658 if util.pathexists(self.path): 658 ↛ 659line 658 didn't jump to line 659, because the condition on line 658 was never true

659 os.rmdir(self.path) 

660 except Exception as e: 

661 util.SMlog("LVMSR.detach: failed to remove the symlink " \ 

662 "directory %s. Error: %s" % (self.path, str(e))) 

663 success = False 

664 

665 if not success: 

666 raise Exception("SR detach failed, please refer to the log " \ 

667 "for details.") 

668 

669 # Don't delete lock files on the master as it will break the locking 

670 # between SM and any GC thread that survives through SR.detach. 

671 # However, we should still delete lock files on slaves as it is the 

672 # only place to do so. 

673 self._cleanup(self.isMaster) 

674 

675 @override 

676 def forget_vdi(self, uuid) -> None: 

677 if not self.legacyMode: 

678 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) 

679 super(LVMSR, self).forget_vdi(uuid) 

680 

681 @override 

682 def scan(self, uuid) -> None: 

683 activated_lvs = set() 

684 try: 

685 util.SMlog("LVMSR.scan for %s" % self.uuid) 

686 if not self.isMaster: 686 ↛ 687line 686 didn't jump to line 687, because the condition on line 686 was never true

687 util.SMlog('sr_scan blocked for non-master') 

688 raise xs_errors.XenError('LVMMaster') 

689 

690 if self._refresh_size(): 690 ↛ 692line 690 didn't jump to line 692, because the condition on line 690 was never false

691 self._expand_size() 

692 self.lvmCache.refresh() 

693 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) 

694 self._loadvdis() 

695 stats = lvutil._getVGstats(self.vgname) 

696 self.physical_size = stats['physical_size'] 

697 self.physical_utilisation = stats['physical_utilisation'] 

698 

699 # Now check if there are any VDIs in the metadata, which are not in 

700 # XAPI 

701 if self.mdexists: 701 ↛ 812line 701 didn't jump to line 812, because the condition on line 701 was never false

702 vdiToSnaps: Dict[str, List[str]] = {} 

703 # get VDIs from XAPI 

704 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) 

705 vdi_uuids = set([]) 

706 for vdi in vdis: 

707 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi)) 

708 

709 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] 

710 

711 for vdi in list(info.keys()): 

712 vdi_uuid = info[vdi][UUID_TAG] 

713 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 713 ↛ 714line 713 didn't jump to line 714, because the condition on line 713 was never true

714 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps: 

715 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) 

716 else: 

717 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid] 

718 

719 if vdi_uuid not in vdi_uuids: 

720 util.SMlog("Introduce VDI %s as it is present in " \ 

721 "metadata and not in XAPI." % vdi_uuid) 

722 vdi_type = info[vdi][VDI_TYPE_TAG] 

723 sm_config = {} 

724 sm_config['vdi_type'] = vdi_type 

725 lvname = "%s%s" % (LV_PREFIX[sm_config['vdi_type']], vdi_uuid) 

726 self.lvActivator.activate( 

727 vdi_uuid, lvname, LVActivator.NORMAL) 

728 activated_lvs.add(vdi_uuid) 

729 lvPath = os.path.join(self.path, lvname) 

730 

731 if not VdiType.isCowImage(vdi_type): 731 ↛ 732line 731 didn't jump to line 732, because the condition on line 731 was never true

732 size = self.lvmCache.getSize(LV_PREFIX[vdi_type] + vdi_uuid) 

733 utilisation = \ 

734 util.roundup(lvutil.LVM_SIZE_INCREMENT, 

735 int(size)) 

736 else: 

737 cowutil = getCowUtil(vdi_type) 

738 lvmcowutil = LvmCowUtil(cowutil) 

739 

740 parent = cowutil.getParentNoCheck(lvPath) 

741 

742 if parent is not None: 742 ↛ 743line 742 didn't jump to line 743, because the condition on line 742 was never true

743 sm_config['vhd-parent'] = parent[parent.find('-') + 1:] 

744 size = cowutil.getSizeVirt(lvPath) 

745 if self.provision == "thin": 745 ↛ 746line 745 didn't jump to line 746, because the condition on line 745 was never true

746 utilisation = util.roundup( 

747 lvutil.LVM_SIZE_INCREMENT, 

748 cowutil.calcOverheadEmpty(max(size, cowutil.getDefaultPreallocationSizeVirt())) 

749 ) 

750 else: 

751 utilisation = lvmcowutil.calcVolumeSize(int(size)) 

752 

753 vdi_ref = self.session.xenapi.VDI.db_introduce( 

754 vdi_uuid, 

755 info[vdi][NAME_LABEL_TAG], 

756 info[vdi][NAME_DESCRIPTION_TAG], 

757 self.sr_ref, 

758 info[vdi][TYPE_TAG], 

759 False, 

760 bool(int(info[vdi][READ_ONLY_TAG])), 

761 {}, 

762 vdi_uuid, 

763 {}, 

764 sm_config) 

765 

766 self.session.xenapi.VDI.set_managed(vdi_ref, 

767 bool(int(info[vdi][MANAGED_TAG]))) 

768 self.session.xenapi.VDI.set_virtual_size(vdi_ref, 

769 str(size)) 

770 self.session.xenapi.VDI.set_physical_utilisation( \ 

771 vdi_ref, str(utilisation)) 

772 self.session.xenapi.VDI.set_is_a_snapshot( \ 

773 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG]))) 

774 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 774 ↛ 775line 774 didn't jump to line 775, because the condition on line 774 was never true

775 self.session.xenapi.VDI.set_snapshot_time( \ 

776 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG])) 

777 if info[vdi][TYPE_TAG] == 'metadata': 777 ↛ 778line 777 didn't jump to line 778, because the condition on line 777 was never true

778 self.session.xenapi.VDI.set_metadata_of_pool( \ 

779 vdi_ref, info[vdi][METADATA_OF_POOL_TAG]) 

780 

781 # Update CBT status of disks either just added 

782 # or already in XAPI 

783 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) 

784 if cbt_logname in cbt_vdis: 784 ↛ 785line 784 didn't jump to line 785, because the condition on line 784 was never true

785 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) 

786 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) 

787 # For existing VDIs, update local state too 

788 # Scan in base class SR updates existing VDIs 

789 # again based on local states 

790 if vdi_uuid in self.vdis: 

791 self.vdis[vdi_uuid].cbt_enabled = True 

792 cbt_vdis.remove(cbt_logname) 

793 

794 # Now set the snapshot statuses correctly in XAPI 

795 for srcvdi in vdiToSnaps.keys(): 795 ↛ 796line 795 didn't jump to line 796, because the loop on line 795 never started

796 try: 

797 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) 

798 except: 

799 # the source VDI no longer exists, continue 

800 continue 

801 

802 for snapvdi in vdiToSnaps[srcvdi]: 

803 try: 

804 # this might fail in cases where its already set 

805 snapref = \ 

806 self.session.xenapi.VDI.get_by_uuid(snapvdi) 

807 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) 

808 except Exception as e: 

809 util.SMlog("Setting snapshot failed. " \ 

810 "Error: %s" % str(e)) 

811 

812 if cbt_vdis: 812 ↛ 823line 812 didn't jump to line 823, because the condition on line 812 was never false

813 # If we have items remaining in this list, 

814 # they are cbt_metadata VDI that XAPI doesn't know about 

815 # Add them to self.vdis and they'll get added to the DB 

816 for cbt_vdi in cbt_vdis: 816 ↛ 817line 816 didn't jump to line 817, because the loop on line 816 never started

817 cbt_uuid = cbt_vdi.split(".")[0] 

818 new_vdi = self.vdi(cbt_uuid) 

819 new_vdi.ty = "cbt_metadata" 

820 new_vdi.cbt_enabled = True 

821 self.vdis[cbt_uuid] = new_vdi 

822 

823 super(LVMSR, self).scan(uuid) 

824 self._kickGC() 

825 

826 finally: 

827 for vdi in activated_lvs: 

828 self.lvActivator.deactivate( 

829 vdi, LVActivator.NORMAL, False) 

830 

831 @override 

832 def update(self, uuid) -> None: 

833 if not lvutil._checkVG(self.vgname): 833 ↛ 834line 833 didn't jump to line 834, because the condition on line 833 was never true

834 return 

835 self._updateStats(uuid, 0) 

836 

837 if self.legacyMode: 837 ↛ 838line 837 didn't jump to line 838, because the condition on line 837 was never true

838 return 

839 

840 # synch name_label in metadata with XAPI 

841 update_map = {} 

842 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ 

843 METADATA_OBJECT_TYPE_SR, 

844 NAME_LABEL_TAG: util.to_plain_string( \ 

845 self.session.xenapi.SR.get_name_label(self.sr_ref)), 

846 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

847 self.session.xenapi.SR.get_name_description(self.sr_ref)) 

848 } 

849 LVMMetadataHandler(self.mdpath).updateMetadata(update_map) 

850 

851 def _updateStats(self, uuid, virtAllocDelta): 

852 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

853 self.virtual_allocation = valloc + virtAllocDelta 

854 util.SMlog("Setting virtual_allocation of SR %s to %d" % 

855 (uuid, self.virtual_allocation)) 

856 stats = lvutil._getVGstats(self.vgname) 

857 self.physical_size = stats['physical_size'] 

858 self.physical_utilisation = stats['physical_utilisation'] 

859 self._db_update() 

860 

861 @override 

862 @deviceCheck 

863 def probe(self) -> str: 

864 return lvutil.srlist_toxml( 

865 lvutil.scan_srlist(VG_PREFIX, self.dconf['device']), 

866 VG_PREFIX, 

867 ('metadata' in self.srcmd.params['sr_sm_config'] and \ 

868 self.srcmd.params['sr_sm_config']['metadata'] == 'true')) 

869 

870 @override 

871 def vdi(self, uuid) -> VDI.VDI: 

872 return LVMVDI(self, uuid) 

873 

874 def _loadvdis(self): 

875 self.virtual_allocation = 0 

876 self.vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache) 

877 self.allVDIs = {} 

878 

879 for uuid, info in self.vdiInfo.items(): 

880 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 880 ↛ 881line 880 didn't jump to line 881, because the condition on line 880 was never true

881 continue 

882 if info.scanError: 882 ↛ 883line 882 didn't jump to line 883, because the condition on line 882 was never true

883 raise xs_errors.XenError('VDIUnavailable', \ 

884 opterr='Error scanning VDI %s' % uuid) 

885 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid) 

886 if not self.vdis[uuid].hidden: 886 ↛ 879line 886 didn't jump to line 879, because the condition on line 886 was never false

887 self.virtual_allocation += self.vdis[uuid].utilisation 

888 

889 for uuid, vdi in self.vdis.items(): 

890 if vdi.parent: 890 ↛ 891line 890 didn't jump to line 891, because the condition on line 890 was never true

891 if vdi.parent in self.vdis: 

892 self.vdis[vdi.parent].read_only = True 

893 if vdi.parent in geneology: 

894 geneology[vdi.parent].append(uuid) 

895 else: 

896 geneology[vdi.parent] = [uuid] 

897 

898 # Now remove all hidden leaf nodes to avoid introducing records that 

899 # will be GC'ed 

900 for uuid in list(self.vdis.keys()): 

901 if uuid not in geneology and self.vdis[uuid].hidden: 901 ↛ 902line 901 didn't jump to line 902, because the condition on line 901 was never true

902 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

903 del self.vdis[uuid] 

904 

905 def _ensureSpaceAvailable(self, amount_needed): 

906 space_available = lvutil._getVGstats(self.vgname)['freespace'] 

907 if (space_available < amount_needed): 

908 util.SMlog("Not enough space! free space: %d, need: %d" % \ 

909 (space_available, amount_needed)) 

910 raise xs_errors.XenError('SRNoSpace') 

911 

912 def _handleInterruptedCloneOps(self): 

913 entries = self.journaler.getAll(LVMVDI.JRN_CLONE) 

914 for uuid, val in entries.items(): 914 ↛ 915line 914 didn't jump to line 915, because the loop on line 914 never started

915 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid) 

916 self._handleInterruptedCloneOp(uuid, val) 

917 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid) 

918 self.journaler.remove(LVMVDI.JRN_CLONE, uuid) 

919 

920 def _handleInterruptedCoalesceLeaf(self): 

921 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) 

922 if len(entries) > 0: 922 ↛ 923line 922 didn't jump to line 923, because the condition on line 922 was never true

923 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") 

924 cleanup.gc_force(self.session, self.uuid) 

925 self.lvmCache.refresh() 

926 

927 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False): 

928 """Either roll back or finalize the interrupted snapshot/clone 

929 operation. Rolling back is unsafe if the leaf images have already been 

930 in use and written to. However, it is always safe to roll back while 

931 we're still in the context of the failed snapshot operation since the 

932 VBD is paused for the duration of the operation""" 

933 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) 

934 lvs = LvmCowUtil.getVolumeInfo(self.lvmCache) 

935 baseUuid, clonUuid = jval.split("_") 

936 

937 # is there a "base copy" VDI? 

938 if not lvs.get(baseUuid): 

939 # no base copy: make sure the original is there 

940 if lvs.get(origUuid): 

941 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") 

942 return 

943 raise util.SMException("base copy %s not present, " \ 

944 "but no original %s found" % (baseUuid, origUuid)) 

945 

946 vdis = LvmCowUtil.getVDIInfo(self.lvmCache) 

947 base = vdis[baseUuid] 

948 cowutil = getCowUtil(base.vdiType) 

949 

950 if forceUndo: 

951 util.SMlog("Explicit revert") 

952 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid) 

953 return 

954 

955 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): 

956 util.SMlog("One or both leaves missing => revert") 

957 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid) 

958 return 

959 

960 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): 

961 util.SMlog("One or both leaves invalid => revert") 

962 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid) 

963 return 

964 

965 orig = vdis[origUuid] 

966 self.lvActivator.activate(baseUuid, base.lvName, False) 

967 self.lvActivator.activate(origUuid, orig.lvName, False) 

968 if orig.parentUuid != baseUuid: 

969 parent = vdis[orig.parentUuid] 

970 self.lvActivator.activate(parent.uuid, parent.lvName, False) 

971 origPath = os.path.join(self.path, orig.lvName) 

972 

973 if cowutil.check(origPath) != CowUtil.CheckResult.Success: 

974 util.SMlog("Orig image invalid => revert") 

975 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid) 

976 return 

977 

978 if clonUuid: 

979 clon = vdis[clonUuid] 

980 clonPath = os.path.join(self.path, clon.lvName) 

981 self.lvActivator.activate(clonUuid, clon.lvName, False) 

982 if cowutil.check(clonPath) != CowUtil.CheckResult.Success: 

983 util.SMlog("Clon image invalid => revert") 

984 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid) 

985 return 

986 

987 util.SMlog("Snapshot appears valid, will not roll back") 

988 self._completeCloneOp(cowutil, vdis, origUuid, baseUuid, clonUuid) 

989 

990 def _undoCloneOp(self, cowutil, lvs, origUuid, baseUuid, clonUuid): 

991 base = lvs[baseUuid] 

992 basePath = os.path.join(self.path, base.name) 

993 

994 # make the parent RW 

995 if base.readonly: 

996 self.lvmCache.setReadonly(base.name, False) 

997 

998 ns = NS_PREFIX_LVM + self.uuid 

999 origRefcountBinary = RefCounter.check(origUuid, ns)[1] 

1000 origRefcountNormal = 0 

1001 

1002 # un-hide the parent 

1003 if VdiType.isCowImage(base.vdiType): 

1004 self.lvActivator.activate(baseUuid, base.name, False) 

1005 origRefcountNormal = 1 

1006 imageInfo = cowutil.getInfo(basePath, LvmCowUtil.extractUuid, False) 

1007 if imageInfo.hidden: 

1008 cowutil.setHidden(basePath, False) 

1009 elif base.hidden: 

1010 self.lvmCache.setHidden(base.name, False) 

1011 

1012 # remove the child nodes 

1013 if clonUuid and lvs.get(clonUuid): 

1014 if not VdiType.isCowImage(lvs[clonUuid].vdiType): 

1015 raise util.SMException("clone %s not a COW image" % clonUuid) 

1016 self.lvmCache.remove(lvs[clonUuid].name) 

1017 if self.lvActivator.get(clonUuid, False): 

1018 self.lvActivator.remove(clonUuid, False) 

1019 if lvs.get(origUuid): 

1020 self.lvmCache.remove(lvs[origUuid].name) 

1021 

1022 # inflate the parent to fully-allocated size 

1023 if VdiType.isCowImage(base.vdiType): 

1024 lvmcowutil = LvmCowUtil(cowutil) 

1025 fullSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt) 

1026 lvmcowutil.inflate(self.journaler, self.uuid, baseUuid, base.vdiType, fullSize) 

1027 

1028 # rename back 

1029 origLV = LV_PREFIX[base.vdiType] + origUuid 

1030 self.lvmCache.rename(base.name, origLV) 

1031 RefCounter.reset(baseUuid, ns) 

1032 if self.lvActivator.get(baseUuid, False): 

1033 self.lvActivator.replace(baseUuid, origUuid, origLV, False) 

1034 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns) 

1035 

1036 # At this stage, tapdisk and SM vdi will be in paused state. Remove 

1037 # flag to facilitate vm deactivate 

1038 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1039 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused') 

1040 

1041 # update LVM metadata on slaves 

1042 slaves = util.get_slaves_attached_on(self.session, [origUuid]) 

1043 LvmCowUtil.refreshVolumeOnSlaves(self.session, self.uuid, self.vgname, 

1044 origLV, origUuid, slaves) 

1045 

1046 util.SMlog("*** INTERRUPTED CLONE OP: rollback success") 

1047 

1048 def _completeCloneOp(self, cowutil, vdis, origUuid, baseUuid, clonUuid): 

1049 """Finalize the interrupted snapshot/clone operation. This must not be 

1050 called from the live snapshot op context because we attempt to pause/ 

1051 unpause the VBD here (the VBD is already paused during snapshot, so it 

1052 would cause a deadlock)""" 

1053 base = vdis[baseUuid] 

1054 clon = None 

1055 if clonUuid: 

1056 clon = vdis[clonUuid] 

1057 

1058 cleanup.abort(self.uuid) 

1059 

1060 # make sure the parent is hidden and read-only 

1061 if not base.hidden: 

1062 if not VdiType.isCowImage(base.vdiType): 

1063 self.lvmCache.setHidden(base.lvName) 

1064 else: 

1065 basePath = os.path.join(self.path, base.lvName) 

1066 cowutil.setHidden(basePath) 

1067 if not base.lvReadonly: 

1068 self.lvmCache.setReadonly(base.lvName, True) 

1069 

1070 # NB: since this snapshot-preserving call is only invoked outside the 

1071 # snapshot op context, we assume the LVM metadata on the involved slave 

1072 # has by now been refreshed and do not attempt to do it here 

1073 

1074 # Update the original record 

1075 try: 

1076 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) 

1077 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

1078 type = self.session.xenapi.VDI.get_type(vdi_ref) 

1079 sm_config["vdi_type"] = vdis[origUuid].vdiType 

1080 sm_config['vhd-parent'] = baseUuid 

1081 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

1082 except XenAPI.Failure: 

1083 util.SMlog("ERROR updating the orig record") 

1084 

1085 # introduce the new VDI records 

1086 if clonUuid: 

1087 try: 

1088 clon_vdi = VDI.VDI(self, clonUuid) 

1089 clon_vdi.read_only = False 

1090 clon_vdi.location = clonUuid 

1091 clon_vdi.utilisation = clon.sizeLV 

1092 clon_vdi.sm_config = { 

1093 "vdi_type": clon.vdiType, 

1094 "vhd-parent": baseUuid} 

1095 

1096 if not self.legacyMode: 

1097 LVMMetadataHandler(self.mdpath). \ 

1098 ensureSpaceIsAvailableForVdis(1) 

1099 

1100 clon_vdi_ref = clon_vdi._db_introduce() 

1101 util.SMlog("introduced clon VDI: %s (%s)" % \ 

1102 (clon_vdi_ref, clonUuid)) 

1103 

1104 vdi_info = {UUID_TAG: clonUuid, 

1105 NAME_LABEL_TAG: clon_vdi.label, 

1106 NAME_DESCRIPTION_TAG: clon_vdi.description, 

1107 IS_A_SNAPSHOT_TAG: 0, 

1108 SNAPSHOT_OF_TAG: '', 

1109 SNAPSHOT_TIME_TAG: '', 

1110 TYPE_TAG: type, 

1111 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], 

1112 READ_ONLY_TAG: int(clon_vdi.read_only), 

1113 MANAGED_TAG: int(clon_vdi.managed), 

1114 METADATA_OF_POOL_TAG: '' 

1115 } 

1116 

1117 if not self.legacyMode: 

1118 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1119 

1120 except XenAPI.Failure: 

1121 util.SMlog("ERROR introducing the clon record") 

1122 

1123 try: 

1124 base_vdi = VDI.VDI(self, baseUuid) # readonly parent 

1125 base_vdi.label = "base copy" 

1126 base_vdi.read_only = True 

1127 base_vdi.location = baseUuid 

1128 base_vdi.size = base.sizeVirt 

1129 base_vdi.utilisation = base.sizeLV 

1130 base_vdi.managed = False 

1131 base_vdi.sm_config = { 

1132 "vdi_type": base.vdiType, 

1133 "vhd-parent": baseUuid} 

1134 

1135 if not self.legacyMode: 

1136 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1) 

1137 

1138 base_vdi_ref = base_vdi._db_introduce() 

1139 util.SMlog("introduced base VDI: %s (%s)" % \ 

1140 (base_vdi_ref, baseUuid)) 

1141 

1142 vdi_info = {UUID_TAG: baseUuid, 

1143 NAME_LABEL_TAG: base_vdi.label, 

1144 NAME_DESCRIPTION_TAG: base_vdi.description, 

1145 IS_A_SNAPSHOT_TAG: 0, 

1146 SNAPSHOT_OF_TAG: '', 

1147 SNAPSHOT_TIME_TAG: '', 

1148 TYPE_TAG: type, 

1149 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], 

1150 READ_ONLY_TAG: int(base_vdi.read_only), 

1151 MANAGED_TAG: int(base_vdi.managed), 

1152 METADATA_OF_POOL_TAG: '' 

1153 } 

1154 

1155 if not self.legacyMode: 

1156 LVMMetadataHandler(self.mdpath).addVdi(vdi_info) 

1157 except XenAPI.Failure: 

1158 util.SMlog("ERROR introducing the base record") 

1159 

1160 util.SMlog("*** INTERRUPTED CLONE OP: complete") 

1161 

1162 def _undoAllJournals(self): 

1163 """Undo all COW image & SM interrupted journaled operations. This call must 

1164 be serialized with respect to all operations that create journals""" 

1165 # undoing interrupted inflates must be done first, since undoing COW images 

1166 # ops might require inflations 

1167 self.lock.acquire() 

1168 try: 

1169 self._undoAllInflateJournals() 

1170 self._undoAllCowJournals() 

1171 self._handleInterruptedCloneOps() 

1172 self._handleInterruptedCoalesceLeaf() 

1173 finally: 

1174 self.lock.release() 

1175 self.cleanup() 

1176 

1177 def _undoAllInflateJournals(self): 

1178 entries = self.journaler.getAll(LvmCowUtil.JOURNAL_INFLATE) 

1179 if len(entries) == 0: 

1180 return 

1181 self._loadvdis() 

1182 for uuid, val in entries.items(): 

1183 vdi = self.vdis.get(uuid) 

1184 if vdi: 1184 ↛ 1204line 1184 didn't jump to line 1204, because the condition on line 1184 was never false

1185 util.SMlog("Found inflate journal %s, deflating %s to %s" % \ 

1186 (uuid, vdi.path, val)) 

1187 if vdi.readonly: 1187 ↛ 1188line 1187 didn't jump to line 1188, because the condition on line 1187 was never true

1188 self.lvmCache.setReadonly(vdi.lvname, False) 

1189 self.lvActivator.activate(uuid, vdi.lvname, False) 

1190 currSizeLV = self.lvmCache.getSize(vdi.lvname) 

1191 

1192 cowutil = getCowUtil(vdi.vdi_type) 

1193 lvmcowutil = LvmCowUtil(cowutil) 

1194 

1195 footer_size = cowutil.getFooterSize() 

1196 util.zeroOut(vdi.path, currSizeLV - footer_size, footer_size) 

1197 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(val)) 

1198 if vdi.readonly: 1198 ↛ 1199line 1198 didn't jump to line 1199, because the condition on line 1198 was never true

1199 self.lvmCache.setReadonly(vdi.lvname, True) 

1200 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1200 ↛ 1201line 1200 didn't jump to line 1201, because the condition on line 1200 was never true

1201 LvmCowUtil.refreshVolumeOnAllSlaves( 

1202 self.session, self.uuid, self.vgname, vdi.lvname, uuid 

1203 ) 

1204 self.journaler.remove(LvmCowUtil.JOURNAL_INFLATE, uuid) 

1205 delattr(self, "vdiInfo") 

1206 delattr(self, "allVDIs") 

1207 

1208 def _undoAllCowJournals(self): 

1209 """ 

1210 Check if there are COW journals in existence and revert them. 

1211 """ 

1212 journals = LvmCowUtil.getAllResizeJournals(self.lvmCache) 

1213 if len(journals) == 0: 1213 ↛ 1215line 1213 didn't jump to line 1215, because the condition on line 1213 was never false

1214 return 

1215 self._loadvdis() 

1216 

1217 for uuid, jlvName in journals: 

1218 vdi = self.vdis[uuid] 

1219 util.SMlog("Found COW journal %s, reverting %s" % (uuid, vdi.path)) 

1220 cowutil = getCowUtil(vdi.vdi_type) 

1221 lvmcowutil = LvmCowUtil(cowutil) 

1222 

1223 self.lvActivator.activate(uuid, vdi.lvname, False) 

1224 self.lvmCache.activateNoRefcount(jlvName) 

1225 fullSize = lvmcowutil.calcVolumeSize(vdi.size) 

1226 lvmcowutil.inflate(self.journaler, self.uuid, vdi.uuid, vdi.vdi_type, fullSize) 

1227 try: 

1228 jFile = os.path.join(self.path, jlvName) 

1229 cowutil.revert(vdi.path, jFile) 

1230 except util.CommandException: 

1231 util.logException("COW journal revert") 

1232 cowutil.check(vdi.path) 

1233 util.SMlog("COW image revert failed but COW image ok: removing journal") 

1234 # Attempt to reclaim unused space 

1235 

1236 

1237 imageInfo = cowutil.getInfo(vdi.path, LvmCowUtil.extractUuid, False) 

1238 NewSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt) 

1239 if NewSize < fullSize: 

1240 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) 

1241 LvmCowUtil.refreshVolumeOnAllSlaves(self.session, self.uuid, self.vgname, vdi.lvname, uuid) 

1242 self.lvmCache.remove(jlvName) 

1243 delattr(self, "vdiInfo") 

1244 delattr(self, "allVDIs") 

1245 

1246 def _updateSlavesPreClone(self, hostRefs, origOldLV): 

1247 masterRef = util.get_this_host_ref(self.session) 

1248 args = {"vgName": self.vgname, 

1249 "action1": "deactivateNoRefcount", 

1250 "lvName1": origOldLV} 

1251 for hostRef in hostRefs: 

1252 if hostRef == masterRef: 1252 ↛ 1253line 1252 didn't jump to line 1253, because the condition on line 1252 was never true

1253 continue 

1254 util.SMlog("Deactivate VDI on %s" % hostRef) 

1255 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1256 util.SMlog("call-plugin returned: %s" % rv) 

1257 if not rv: 1257 ↛ 1258line 1257 didn't jump to line 1258, because the condition on line 1257 was never true

1258 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1259 

1260 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV, 

1261 baseUuid, baseLV): 

1262 """We need to reactivate the original LV on each slave (note that the 

1263 name for the original LV might change), as well as init the refcount 

1264 for the base LV""" 

1265 args = {"vgName": self.vgname, 

1266 "action1": "refresh", 

1267 "lvName1": origLV, 

1268 "action2": "activate", 

1269 "ns2": NS_PREFIX_LVM + self.uuid, 

1270 "lvName2": baseLV, 

1271 "uuid2": baseUuid} 

1272 

1273 masterRef = util.get_this_host_ref(self.session) 

1274 for hostRef in hostRefs: 

1275 if hostRef == masterRef: 1275 ↛ 1276line 1275 didn't jump to line 1276, because the condition on line 1275 was never true

1276 continue 

1277 util.SMlog("Updating %s, %s, %s on slave %s" % \ 

1278 (origOldLV, origLV, baseLV, hostRef)) 

1279 rv = self.session.xenapi.host.call_plugin( 

1280 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1281 util.SMlog("call-plugin returned: %s" % rv) 

1282 if not rv: 1282 ↛ 1283line 1282 didn't jump to line 1283, because the condition on line 1282 was never true

1283 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1284 

1285 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog): 

1286 """Reactivate and refresh CBT log file on slaves""" 

1287 args = {"vgName": self.vgname, 

1288 "action1": "deactivateNoRefcount", 

1289 "lvName1": cbtlog, 

1290 "action2": "refresh", 

1291 "lvName2": cbtlog} 

1292 

1293 masterRef = util.get_this_host_ref(self.session) 

1294 for hostRef in hostRefs: 

1295 if hostRef == masterRef: 1295 ↛ 1296line 1295 didn't jump to line 1296, because the condition on line 1295 was never true

1296 continue 

1297 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef)) 

1298 rv = self.session.xenapi.host.call_plugin( 

1299 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1300 util.SMlog("call-plugin returned: %s" % rv) 

1301 if not rv: 1301 ↛ 1302line 1301 didn't jump to line 1302, because the condition on line 1301 was never true

1302 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1303 

1304 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV): 

1305 """Tell the slave we deleted the base image""" 

1306 args = {"vgName": self.vgname, 

1307 "action1": "cleanupLockAndRefcount", 

1308 "uuid1": baseUuid, 

1309 "ns1": NS_PREFIX_LVM + self.uuid} 

1310 

1311 masterRef = util.get_this_host_ref(self.session) 

1312 for hostRef in hostRefs: 

1313 if hostRef == masterRef: 1313 ↛ 1314line 1313 didn't jump to line 1314, because the condition on line 1313 was never true

1314 continue 

1315 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef)) 

1316 rv = self.session.xenapi.host.call_plugin( 

1317 hostRef, self.PLUGIN_ON_SLAVE, "multi", args) 

1318 util.SMlog("call-plugin returned: %s" % rv) 

1319 if not rv: 1319 ↛ 1320line 1319 didn't jump to line 1320, because the condition on line 1319 was never true

1320 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE) 

1321 

1322 def _cleanup(self, skipLockCleanup=False): 

1323 """delete stale refcounter, flag, and lock files""" 

1324 RefCounter.resetAll(NS_PREFIX_LVM + self.uuid) 

1325 IPCFlag(self.uuid).clearAll() 

1326 if not skipLockCleanup: 1326 ↛ 1327line 1326 didn't jump to line 1327, because the condition on line 1326 was never true

1327 lock.Lock.cleanupAll(self.uuid) 

1328 lock.Lock.cleanupAll(NS_PREFIX_LVM + self.uuid) 

1329 

1330 def _prepareTestMode(self): 

1331 util.SMlog("Test mode: %s" % self.testMode) 

1332 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1332 ↛ 1333line 1332 didn't jump to line 1333, because the condition on line 1332 was never true

1333 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" 

1334 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode]) 

1335 

1336 def _kickGC(self): 

1337 util.SMlog("Kicking GC") 

1338 cleanup.start_gc_service(self.uuid) 

1339 

1340 def ensureCBTSpace(self): 

1341 # Ensure we have space for at least one LV 

1342 self._ensureSpaceAvailable(self.journaler.LV_SIZE) 

1343 

1344 

1345class LVMVDI(VDI.VDI): 

1346 

1347 JRN_CLONE = "clone" # journal entry type for the clone operation 

1348 

1349 @override 

1350 def load(self, vdi_uuid) -> None: 

1351 self.lock = self.sr.lock 

1352 self.lvActivator = self.sr.lvActivator 

1353 self.loaded = False 

1354 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1354 ↛ 1356line 1354 didn't jump to line 1356, because the condition on line 1354 was never false

1355 self._setType(VdiType.RAW) 

1356 self.uuid = vdi_uuid 

1357 self.location = self.uuid 

1358 self.exists = True 

1359 

1360 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid): 

1361 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid]) 

1362 if self.parent: 1362 ↛ 1363line 1362 didn't jump to line 1363, because the condition on line 1362 was never true

1363 self.sm_config_override['vhd-parent'] = self.parent 

1364 else: 

1365 self.sm_config_override['vhd-parent'] = None 

1366 return 

1367 

1368 # scan() didn't run: determine the type of the VDI manually 

1369 if self._determineType(): 1369 ↛ 1373line 1369 didn't jump to line 1373, because the condition on line 1369 was never false

1370 return 

1371 

1372 # the VDI must be in the process of being created 

1373 self.exists = False 

1374 

1375 vdi_sm_config = self.sr.srcmd.params.get("vdi_sm_config") 

1376 if vdi_sm_config: 

1377 image_format = vdi_sm_config.get("image-format") or vdi_sm_config.get("type") 

1378 if image_format: 

1379 try: 

1380 self._setType(CREATE_PARAM_TYPES[image_format]) 

1381 except: 

1382 raise xs_errors.XenError('VDICreate', opterr='bad image format') 

1383 if self.sr.legacyMode and self.sr.cmd == 'vdi_create' and VdiType.isCowImage(self.vdi_type): 

1384 raise xs_errors.XenError('VDICreate', opterr='Cannot create COW type disk in legacy mode') 

1385 

1386 if not self.vdi_type: 

1387 self._setType(getVdiTypeFromImageFormat(self.sr.preferred_image_formats[0])) 

1388 

1389 self.lvname = "%s%s" % (LV_PREFIX[self.vdi_type], vdi_uuid) 

1390 self.path = os.path.join(self.sr.path, self.lvname) 

1391 

1392 @override 

1393 def create(self, sr_uuid, vdi_uuid, size) -> str: 

1394 util.SMlog("LVMVDI.create for %s" % self.uuid) 

1395 if not self.sr.isMaster: 

1396 raise xs_errors.XenError('LVMMaster') 

1397 if self.exists: 

1398 raise xs_errors.XenError('VDIExists') 

1399 

1400 size = self.cowutil.validateAndRoundImageSize(int(size)) 

1401 

1402 util.SMlog("LVMVDI.create: type = %s, %s (size=%s)" % \ 

1403 (self.vdi_type, self.path, size)) 

1404 lvSize = 0 

1405 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] 

1406 if not VdiType.isCowImage(self.vdi_type): 

1407 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size)) 

1408 else: 

1409 if self.sr.provision == "thin": 

1410 lvSize = util.roundup( 

1411 lvutil.LVM_SIZE_INCREMENT, 

1412 self.cowutil.calcOverheadEmpty(max(size, self.cowutil.getDefaultPreallocationSizeVirt())) 

1413 ) 

1414 elif self.sr.provision == "thick": 

1415 lvSize = self.lvmcowutil.calcVolumeSize(int(size)) 

1416 

1417 self.sr._ensureSpaceAvailable(lvSize) 

1418 

1419 try: 

1420 self.sr.lvmCache.create(self.lvname, lvSize) 

1421 if not VdiType.isCowImage(self.vdi_type): 

1422 self.size = self.sr.lvmCache.getSize(self.lvname) 

1423 else: 

1424 self.cowutil.create( 

1425 self.path, int(size), False, self.cowutil.getDefaultPreallocationSizeVirt() 

1426 ) 

1427 self.size = self.cowutil.getSizeVirt(self.path) 

1428 self.sr.lvmCache.deactivateNoRefcount(self.lvname) 

1429 except util.CommandException as e: 

1430 util.SMlog("Unable to create VDI") 

1431 self.sr.lvmCache.remove(self.lvname) 

1432 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code) 

1433 

1434 self.utilisation = lvSize 

1435 self.sm_config["vdi_type"] = self.vdi_type 

1436 self.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type) 

1437 

1438 if not self.sr.legacyMode: 

1439 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1440 

1441 self.ref = self._db_introduce() 

1442 self.sr._updateStats(self.sr.uuid, self.size) 

1443 

1444 vdi_info = {UUID_TAG: self.uuid, 

1445 NAME_LABEL_TAG: util.to_plain_string(self.label), 

1446 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), 

1447 IS_A_SNAPSHOT_TAG: 0, 

1448 SNAPSHOT_OF_TAG: '', 

1449 SNAPSHOT_TIME_TAG: '', 

1450 TYPE_TAG: self.ty, 

1451 VDI_TYPE_TAG: self.vdi_type, 

1452 READ_ONLY_TAG: int(self.read_only), 

1453 MANAGED_TAG: int(self.managed), 

1454 METADATA_OF_POOL_TAG: '' 

1455 } 

1456 

1457 if not self.sr.legacyMode: 

1458 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

1459 

1460 return VDI.VDI.get_params(self) 

1461 

1462 @override 

1463 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None: 

1464 util.SMlog("LVMVDI.delete for %s" % self.uuid) 

1465 try: 

1466 self._loadThis() 

1467 except xs_errors.SRException as e: 

1468 # Catch 'VDI doesn't exist' exception 

1469 if e.errno == 46: 

1470 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1471 raise 

1472 

1473 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1474 if not self.session.xenapi.VDI.get_managed(vdi_ref): 

1475 raise xs_errors.XenError("VDIDelete", \ 

1476 opterr="Deleting non-leaf node not permitted") 

1477 

1478 if not self.hidden: 

1479 self._markHidden() 

1480 

1481 if not data_only: 

1482 # Remove from XAPI and delete from MGT 

1483 self._db_forget() 

1484 else: 

1485 # If this is a data_destroy call, don't remove from XAPI db 

1486 # Only delete from MGT 

1487 if not self.sr.legacyMode: 

1488 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid) 

1489 

1490 # deactivate here because it might be too late to do it in the "final" 

1491 # step: GC might have removed the LV by then 

1492 if self.sr.lvActivator.get(self.uuid, False): 

1493 self.sr.lvActivator.deactivate(self.uuid, False) 

1494 

1495 try: 

1496 self.sr.lvmCache.remove(self.lvname) 

1497 self.sr.lock.cleanup(vdi_uuid, NS_PREFIX_LVM + sr_uuid) 

1498 self.sr.lock.cleanupAll(vdi_uuid) 

1499 except xs_errors.SRException as e: 

1500 util.SMlog( 

1501 "Failed to remove the volume (maybe is leaf coalescing) " 

1502 "for %s err:%d" % (self.uuid, e.errno)) 

1503 

1504 self.sr._updateStats(self.sr.uuid, -self.size) 

1505 self.sr._kickGC() 

1506 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

1507 

1508 @override 

1509 def attach(self, sr_uuid, vdi_uuid) -> str: 

1510 util.SMlog("LVMVDI.attach for %s" % self.uuid) 

1511 if self.sr.journaler.hasJournals(self.uuid): 

1512 raise xs_errors.XenError('VDIUnavailable', 

1513 opterr='Interrupted operation detected on this VDI, ' 

1514 'scan SR first to trigger auto-repair') 

1515 

1516 writable = ('args' not in self.sr.srcmd.params) or \ 

1517 (self.sr.srcmd.params['args'][0] == "true") 

1518 needInflate = True 

1519 if not VdiType.isCowImage(self.vdi_type) or not writable: 

1520 needInflate = False 

1521 else: 

1522 self._loadThis() 

1523 if self.utilisation >= self.lvmcowutil.calcVolumeSize(self.size): 

1524 needInflate = False 

1525 

1526 if needInflate: 

1527 try: 

1528 self._prepareThin(True, self.vdi_type) 

1529 except: 

1530 util.logException("attach") 

1531 raise xs_errors.XenError('LVMProvisionAttach') 

1532 

1533 try: 

1534 return self._attach() 

1535 finally: 

1536 if not self.sr.lvActivator.deactivateAll(): 

1537 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid) 

1538 

1539 @override 

1540 def detach(self, sr_uuid, vdi_uuid) -> None: 

1541 util.SMlog("LVMVDI.detach for %s" % self.uuid) 

1542 self._loadThis() 

1543 already_deflated = (self.utilisation < \ 

1544 self.lvmcowutil.calcVolumeSize(self.size)) 

1545 needDeflate = True 

1546 if not VdiType.isCowImage(self.vdi_type) or already_deflated: 

1547 needDeflate = False 

1548 elif self.sr.provision == "thick": 

1549 needDeflate = False 

1550 # except for snapshots, which are always deflated 

1551 if self.sr.srcmd.cmd != 'vdi_detach_from_config': 

1552 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1553 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) 

1554 if snap: 

1555 needDeflate = True 

1556 

1557 if needDeflate: 

1558 try: 

1559 self._prepareThin(False, self.vdi_type) 

1560 except: 

1561 util.logException("_prepareThin") 

1562 raise xs_errors.XenError('VDIUnavailable', opterr='deflate') 

1563 

1564 try: 

1565 self._detach() 

1566 finally: 

1567 if not self.sr.lvActivator.deactivateAll(): 

1568 raise xs_errors.XenError("SMGeneral", opterr="deactivation") 

1569 

1570 # We only support offline resize 

1571 @override 

1572 def resize(self, sr_uuid, vdi_uuid, size) -> str: 

1573 util.SMlog("LVMVDI.resize for %s" % self.uuid) 

1574 if not self.sr.isMaster: 

1575 raise xs_errors.XenError('LVMMaster') 

1576 

1577 self._loadThis() 

1578 if self.hidden: 

1579 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

1580 

1581 if size < self.size: 

1582 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

1583 '(current size: %d, new size: %d)' % (self.size, size)) 

1584 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

1585 

1586 size = self.cowutil.validateAndRoundImageSize(int(size)) 

1587 

1588 if size == self.size: 

1589 return VDI.VDI.get_params(self) 

1590 

1591 if not VdiType.isCowImage(self.vdi_type): 

1592 lvSizeOld = self.size 

1593 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) 

1594 else: 

1595 lvSizeOld = self.utilisation 

1596 lvSizeNew = self.lvmcowutil.calcVolumeSize(size) 

1597 if self.sr.provision == "thin": 

1598 # VDI is currently deflated, so keep it deflated 

1599 lvSizeNew = lvSizeOld 

1600 assert(lvSizeNew >= lvSizeOld) 

1601 spaceNeeded = lvSizeNew - lvSizeOld 

1602 self.sr._ensureSpaceAvailable(spaceNeeded) 

1603 

1604 oldSize = self.size 

1605 if not VdiType.isCowImage(self.vdi_type): 

1606 self.sr.lvmCache.setSize(self.lvname, lvSizeNew) 

1607 self.size = self.sr.lvmCache.getSize(self.lvname) 

1608 self.utilisation = self.size 

1609 else: 

1610 if lvSizeNew != lvSizeOld: 

1611 self.lvmcowutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type, lvSizeNew) 

1612 self.cowutil.setSizeVirtFast(self.path, size) 

1613 self.size = self.cowutil.getSizeVirt(self.path) 

1614 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

1615 

1616 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

1617 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) 

1618 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

1619 str(self.utilisation)) 

1620 self.sr._updateStats(self.sr.uuid, self.size - oldSize) 

1621 super(LVMVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

1622 return VDI.VDI.get_params(self) 

1623 

1624 @override 

1625 def clone(self, sr_uuid, vdi_uuid) -> str: 

1626 return self._do_snapshot( 

1627 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True) 

1628 

1629 @override 

1630 def compose(self, sr_uuid, vdi1, vdi2) -> None: 

1631 util.SMlog("LVMSR.compose for %s -> %s" % (vdi2, vdi1)) 

1632 if not VdiType.isCowImage(self.vdi_type): 

1633 raise xs_errors.XenError('Unimplemented') 

1634 

1635 parent_uuid = vdi1 

1636 parent_lvname = LV_PREFIX[self.vdi_type] + parent_uuid 

1637 assert(self.sr.lvmCache.checkLV(parent_lvname)) 

1638 parent_path = os.path.join(self.sr.path, parent_lvname) 

1639 

1640 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1641 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False) 

1642 

1643 self.cowutil.setParent(self.path, parent_path, False) 

1644 self.cowutil.setHidden(parent_path) 

1645 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

1646 

1647 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, 

1648 True): 

1649 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

1650 

1651 util.SMlog("Compose done") 

1652 

1653 def reset_leaf(self, sr_uuid, vdi_uuid): 

1654 util.SMlog("LVMSR.reset_leaf for %s" % vdi_uuid) 

1655 if not VdiType.isCowImage(self.vdi_type): 

1656 raise xs_errors.XenError('Unimplemented') 

1657 

1658 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

1659 

1660 # safety check 

1661 if not self.cowutil.hasParent(self.path): 

1662 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

1663 "will not reset contents" % self.uuid) 

1664 

1665 self.cowutil.killData(self.path) 

1666 

1667 def _attach(self): 

1668 self._chainSetActive(True, True, True) 

1669 if not util.pathexists(self.path): 

1670 raise xs_errors.XenError('VDIUnavailable', \ 

1671 opterr='Could not find: %s' % self.path) 

1672 

1673 if not hasattr(self, 'xenstore_data'): 

1674 self.xenstore_data = {} 

1675 

1676 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ 

1677 scsiutil.gen_synthetic_page_data(self.uuid))) 

1678 

1679 self.xenstore_data['storage-type'] = 'lvm' 

1680 self.xenstore_data['vdi-type'] = self.vdi_type 

1681 

1682 self.attached = True 

1683 self.sr.lvActivator.persist() 

1684 return VDI.VDI.attach(self, self.sr.uuid, self.uuid) 

1685 

1686 def _detach(self): 

1687 self._chainSetActive(False, True) 

1688 self.attached = False 

1689 

1690 @override 

1691 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

1692 cloneOp=False, secondary=None, cbtlog=None) -> str: 

1693 # If cbt enabled, save file consistency state 

1694 if cbtlog is not None: 

1695 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1695 ↛ 1696line 1695 didn't jump to line 1696, because the condition on line 1695 was never true

1696 consistency_state = False 

1697 else: 

1698 consistency_state = True 

1699 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

1700 (consistency_state, vdi_uuid)) 

1701 else: 

1702 consistency_state = None 

1703 

1704 pause_time = time.time() 

1705 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1705 ↛ 1706line 1705 didn't jump to line 1706, because the condition on line 1705 was never true

1706 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

1707 

1708 snapResult = None 

1709 try: 

1710 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) 

1711 except Exception as e1: 

1712 try: 

1713 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, 

1714 secondary=None) 

1715 except Exception as e2: 

1716 util.SMlog('WARNING: failed to clean up failed snapshot: ' 

1717 '%s (error ignored)' % e2) 

1718 raise 

1719 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary) 

1720 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

1721 unpause_time = time.time() 

1722 if (unpause_time - pause_time) > LONG_SNAPTIME: 1722 ↛ 1723line 1722 didn't jump to line 1723, because the condition on line 1722 was never true

1723 util.SMlog('WARNING: snapshot paused VM for %s seconds' % 

1724 (unpause_time - pause_time)) 

1725 return snapResult 

1726 

1727 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None): 

1728 util.SMlog("LVMVDI._snapshot for %s (type %s)" % (self.uuid, snapType)) 

1729 

1730 if not self.sr.isMaster: 1730 ↛ 1731line 1730 didn't jump to line 1731, because the condition on line 1730 was never true

1731 raise xs_errors.XenError('LVMMaster') 

1732 if self.sr.legacyMode: 1732 ↛ 1733line 1732 didn't jump to line 1733, because the condition on line 1732 was never true

1733 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode') 

1734 

1735 self._loadThis() 

1736 if self.hidden: 1736 ↛ 1737line 1736 didn't jump to line 1737, because the condition on line 1736 was never true

1737 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI') 

1738 

1739 snapVdiType = self.sr._get_snap_vdi_type(self.vdi_type, self.size) 

1740 

1741 self.sm_config = self.session.xenapi.VDI.get_sm_config( \ 

1742 self.sr.srcmd.params['vdi_ref']) 

1743 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1743 ↛ 1744line 1743 didn't jump to line 1744, because the condition on line 1743 was never true

1744 if not util.fistpoint.is_active("testsm_clone_allow_raw"): 

1745 raise xs_errors.XenError('Unimplemented', \ 

1746 opterr='Raw VDI, snapshot or clone not permitted') 

1747 

1748 # we must activate the entire image chain because the real parent could 

1749 # theoretically be anywhere in the chain if all images under it are empty 

1750 self._chainSetActive(True, False) 

1751 if not util.pathexists(self.path): 1751 ↛ 1752line 1751 didn't jump to line 1752, because the condition on line 1751 was never true

1752 raise xs_errors.XenError('VDIUnavailable', \ 

1753 opterr='VDI unavailable: %s' % (self.path)) 

1754 

1755 if VdiType.isCowImage(self.vdi_type): 1755 ↛ 1763line 1755 didn't jump to line 1763, because the condition on line 1755 was never false

1756 depth = self.cowutil.getDepth(self.path) 

1757 if depth == -1: 1757 ↛ 1758line 1757 didn't jump to line 1758, because the condition on line 1757 was never true

1758 raise xs_errors.XenError('VDIUnavailable', \ 

1759 opterr='failed to get COW depth') 

1760 elif depth >= self.cowutil.getMaxChainLength(): 1760 ↛ 1761line 1760 didn't jump to line 1761, because the condition on line 1760 was never true

1761 raise xs_errors.XenError('SnapshotChainTooLong') 

1762 

1763 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ 

1764 self.sr.srcmd.params['vdi_ref']) 

1765 

1766 fullpr = self.lvmcowutil.calcVolumeSize(self.size) 

1767 thinpr = util.roundup( 

1768 lvutil.LVM_SIZE_INCREMENT, 

1769 self.cowutil.calcOverheadEmpty(max(self.size, self.cowutil.getDefaultPreallocationSizeVirt())) 

1770 ) 

1771 lvSizeOrig = thinpr 

1772 lvSizeClon = thinpr 

1773 

1774 hostRefs = [] 

1775 if self.sr.cmd == "vdi_snapshot": 

1776 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) 

1777 if hostRefs: 1777 ↛ 1779line 1777 didn't jump to line 1779, because the condition on line 1777 was never false

1778 lvSizeOrig = fullpr 

1779 if self.sr.provision == "thick": 1779 ↛ 1785line 1779 didn't jump to line 1785, because the condition on line 1779 was never false

1780 if not self.issnap: 1780 ↛ 1781line 1780 didn't jump to line 1781, because the condition on line 1780 was never true

1781 lvSizeOrig = fullpr 

1782 if self.sr.cmd != "vdi_snapshot": 

1783 lvSizeClon = fullpr 

1784 

1785 if (snapType == VDI.SNAPSHOT_SINGLE or 1785 ↛ 1787line 1785 didn't jump to line 1787, because the condition on line 1785 was never true

1786 snapType == VDI.SNAPSHOT_INTERNAL): 

1787 lvSizeClon = 0 

1788 

1789 # the space required must include 2 journal LVs: a clone journal and an 

1790 # inflate journal (for the failure handling 

1791 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE 

1792 lvSizeBase = self.size 

1793 if VdiType.isCowImage(self.vdi_type): 1793 ↛ 1796line 1793 didn't jump to line 1796, because the condition on line 1793 was never false

1794 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, self.cowutil.getSizePhys(self.path)) 

1795 size_req -= (self.utilisation - lvSizeBase) 

1796 self.sr._ensureSpaceAvailable(size_req) 

1797 

1798 if hostRefs: 

1799 self.sr._updateSlavesPreClone(hostRefs, self.lvname) 

1800 

1801 baseUuid = util.gen_uuid() 

1802 origUuid = self.uuid 

1803 clonUuid = "" 

1804 if snapType == VDI.SNAPSHOT_DOUBLE: 1804 ↛ 1806line 1804 didn't jump to line 1806, because the condition on line 1804 was never false

1805 clonUuid = util.gen_uuid() 

1806 jval = "%s_%s" % (baseUuid, clonUuid) 

1807 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) 

1808 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid) 

1809 

1810 try: 

1811 # self becomes the "base vdi" 

1812 origOldLV = self.lvname 

1813 baseLV = LV_PREFIX[self.vdi_type] + baseUuid 

1814 self.sr.lvmCache.rename(self.lvname, baseLV) 

1815 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) 

1816 RefCounter.set(baseUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid) 

1817 self.uuid = baseUuid 

1818 self.lvname = baseLV 

1819 self.path = os.path.join(self.sr.path, baseLV) 

1820 self.label = "base copy" 

1821 self.read_only = True 

1822 self.location = self.uuid 

1823 self.managed = False 

1824 

1825 # shrink the base copy to the minimum - we do it before creating 

1826 # the snapshot volumes to avoid requiring double the space 

1827 if VdiType.isCowImage(self.vdi_type): 1827 ↛ 1830line 1827 didn't jump to line 1830, because the condition on line 1827 was never false

1828 self.lvmcowutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) 

1829 self.utilisation = lvSizeBase 

1830 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid) 

1831 

1832 snapVDI = self._createSnap(origUuid, snapVdiType, lvSizeOrig, False) 

1833 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) 

1834 snapVDI2 = None 

1835 if snapType == VDI.SNAPSHOT_DOUBLE: 1835 ↛ 1841line 1835 didn't jump to line 1841, because the condition on line 1835 was never false

1836 snapVDI2 = self._createSnap(clonUuid, snapVdiType, lvSizeClon, True) 

1837 # If we have CBT enabled on the VDI, 

1838 # set CBT status for the new snapshot disk 

1839 if cbtlog: 

1840 snapVDI2.cbt_enabled = True 

1841 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid) 

1842 

1843 # note: it is important to mark the parent hidden only AFTER the 

1844 # new image children have been created, which are referencing it; 

1845 # otherwise we would introduce a race with GC that could reclaim 

1846 # the parent before we snapshot it 

1847 if not VdiType.isCowImage(self.vdi_type): 1847 ↛ 1848line 1847 didn't jump to line 1848, because the condition on line 1847 was never true

1848 self.sr.lvmCache.setHidden(self.lvname) 

1849 else: 

1850 self.cowutil.setHidden(self.path) 

1851 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid) 

1852 

1853 # set the base copy to ReadOnly 

1854 self.sr.lvmCache.setReadonly(self.lvname, True) 

1855 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid) 

1856 

1857 if hostRefs: 

1858 self.sr._updateSlavesOnClone(hostRefs, origOldLV, 

1859 snapVDI.lvname, self.uuid, self.lvname) 

1860 

1861 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

1862 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: 

1863 snapVDI._cbt_snapshot(clonUuid, cbt_consistency) 

1864 if hostRefs: 1864 ↛ 1878line 1864 didn't jump to line 1878, because the condition on line 1864 was never false

1865 cbtlog_file = self._get_cbt_logname(snapVDI.uuid) 

1866 try: 

1867 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) 

1868 except: 

1869 alert_name = "VDI_CBT_SNAPSHOT_FAILED" 

1870 alert_str = ("Creating CBT snapshot for {} failed" 

1871 .format(snapVDI.uuid)) 

1872 snapVDI._disable_cbt_on_error(alert_name, alert_str) 

1873 pass 

1874 

1875 except (util.SMException, XenAPI.Failure) as e: 

1876 util.logException("LVMVDI._snapshot") 

1877 self._failClone(origUuid, jval, str(e)) 

1878 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid) 

1879 

1880 self.sr.journaler.remove(self.JRN_CLONE, origUuid) 

1881 

1882 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType) 

1883 

1884 def _createSnap(self, snapUuid, snapVdiType, snapSizeLV, isNew): 

1885 """Snapshot self and return the snapshot VDI object""" 

1886 

1887 snapLV = LV_PREFIX[snapVdiType] + snapUuid 

1888 snapPath = os.path.join(self.sr.path, snapLV) 

1889 self.sr.lvmCache.create(snapLV, int(snapSizeLV)) 

1890 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) 

1891 if isNew: 

1892 RefCounter.set(snapUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid) 

1893 self.sr.lvActivator.add(snapUuid, snapLV, False) 

1894 parentRaw = (self.vdi_type == VdiType.RAW) 

1895 self.cowutil.snapshot( 

1896 snapPath, self.path, parentRaw, max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()) 

1897 ) 

1898 snapParent = self.cowutil.getParent(snapPath, LvmCowUtil.extractUuid) 

1899 

1900 snapVDI = LVMVDI(self.sr, snapUuid) 

1901 snapVDI.read_only = False 

1902 snapVDI.location = snapUuid 

1903 snapVDI.size = self.size 

1904 snapVDI.utilisation = snapSizeLV 

1905 snapVDI.sm_config = dict() 

1906 for key, val in self.sm_config.items(): 1906 ↛ 1907line 1906 didn't jump to line 1907, because the loop on line 1906 never started

1907 if key not in [ 

1908 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \ 

1909 not key.startswith("host_"): 

1910 snapVDI.sm_config[key] = val 

1911 snapVDI.sm_config["vdi_type"] = snapVdiType 

1912 snapVDI.sm_config["vhd-parent"] = snapParent 

1913 snapVDI.lvname = snapLV 

1914 return snapVDI 

1915 

1916 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None): 

1917 if snapType is not VDI.SNAPSHOT_INTERNAL: 1917 ↛ 1919line 1917 didn't jump to line 1919, because the condition on line 1917 was never false

1918 self.sr._updateStats(self.sr.uuid, self.size) 

1919 basePresent = True 

1920 

1921 # Verify parent locator field of both children and delete basePath if 

1922 # unused 

1923 snapParent = snapVDI.sm_config["vhd-parent"] 

1924 snap2Parent = "" 

1925 if snapVDI2: 1925 ↛ 1927line 1925 didn't jump to line 1927, because the condition on line 1925 was never false

1926 snap2Parent = snapVDI2.sm_config["vhd-parent"] 

1927 if snapParent != self.uuid and \ 1927 ↛ 1954line 1927 didn't jump to line 1954, because the condition on line 1927 was never false

1928 (not snapVDI2 or snap2Parent != self.uuid): 

1929 util.SMlog("%s != %s != %s => deleting unused base %s" % \ 

1930 (snapParent, self.uuid, snap2Parent, self.lvname)) 

1931 RefCounter.put(self.uuid, False, NS_PREFIX_LVM + self.sr.uuid) 

1932 self.sr.lvmCache.remove(self.lvname) 

1933 self.sr.lvActivator.remove(self.uuid, False) 

1934 if hostRefs: 

1935 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) 

1936 basePresent = False 

1937 else: 

1938 # assign the _binary_ refcount of the original VDI to the new base 

1939 # VDI (but as the normal refcount, since binary refcounts are only 

1940 # for leaf nodes). The normal refcount of the child is not 

1941 # transferred to to the base VDI because normal refcounts are 

1942 # incremented and decremented individually, and not based on the 

1943 # image chain (i.e., the child's normal refcount will be decremented 

1944 # independently of its parent situation). Add 1 for this clone op. 

1945 # Note that we do not need to do protect the refcount operations 

1946 # below with per-VDI locking like we do in lvutil because at this 

1947 # point we have exclusive access to the VDIs involved. Other SM 

1948 # operations are serialized by the Agent or with the SR lock, and 

1949 # any coalesce activations are serialized with the SR lock. (The 

1950 # coalesce activates the coalesced VDI pair in the beginning, which 

1951 # cannot affect the VDIs here because they cannot possibly be 

1952 # involved in coalescing at this point, and at the relinkSkip step 

1953 # that activates the children, which takes the SR lock.) 

1954 ns = NS_PREFIX_LVM + self.sr.uuid 

1955 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) 

1956 RefCounter.set(self.uuid, bcnt + 1, 0, ns) 

1957 

1958 # the "paused" and "host_*" sm-config keys are special and must stay on 

1959 # the leaf without being inherited by anyone else 

1960 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1960 ↛ 1961line 1960 didn't jump to line 1961, because the loop on line 1960 never started

1961 snapVDI.sm_config[key] = self.sm_config[key] 

1962 del self.sm_config[key] 

1963 

1964 # Introduce any new VDI records & update the existing one 

1965 type = self.session.xenapi.VDI.get_type( \ 

1966 self.sr.srcmd.params['vdi_ref']) 

1967 if snapVDI2: 1967 ↛ 2009line 1967 didn't jump to line 2009, because the condition on line 1967 was never false

1968 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

1969 vdiRef = snapVDI2._db_introduce() 

1970 if cloneOp: 

1971 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1972 NAME_LABEL_TAG: util.to_plain_string( \ 

1973 self.session.xenapi.VDI.get_name_label( \ 

1974 self.sr.srcmd.params['vdi_ref'])), 

1975 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1976 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1977 IS_A_SNAPSHOT_TAG: 0, 

1978 SNAPSHOT_OF_TAG: '', 

1979 SNAPSHOT_TIME_TAG: '', 

1980 TYPE_TAG: type, 

1981 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

1982 READ_ONLY_TAG: 0, 

1983 MANAGED_TAG: int(snapVDI2.managed), 

1984 METADATA_OF_POOL_TAG: '' 

1985 } 

1986 else: 

1987 util.SMlog("snapshot VDI params: %s" % \ 

1988 self.session.xenapi.VDI.get_snapshot_time(vdiRef)) 

1989 vdi_info = {UUID_TAG: snapVDI2.uuid, 

1990 NAME_LABEL_TAG: util.to_plain_string( \ 

1991 self.session.xenapi.VDI.get_name_label( \ 

1992 self.sr.srcmd.params['vdi_ref'])), 

1993 NAME_DESCRIPTION_TAG: util.to_plain_string( \ 

1994 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])), 

1995 IS_A_SNAPSHOT_TAG: 1, 

1996 SNAPSHOT_OF_TAG: snapVDI.uuid, 

1997 SNAPSHOT_TIME_TAG: '', 

1998 TYPE_TAG: type, 

1999 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], 

2000 READ_ONLY_TAG: 0, 

2001 MANAGED_TAG: int(snapVDI2.managed), 

2002 METADATA_OF_POOL_TAG: '' 

2003 } 

2004 

2005 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

2006 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ 

2007 (vdiRef, snapVDI2.uuid)) 

2008 

2009 if basePresent: 2009 ↛ 2010line 2009 didn't jump to line 2010, because the condition on line 2009 was never true

2010 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) 

2011 vdiRef = self._db_introduce() 

2012 vdi_info = {UUID_TAG: self.uuid, 

2013 NAME_LABEL_TAG: self.label, 

2014 NAME_DESCRIPTION_TAG: self.description, 

2015 IS_A_SNAPSHOT_TAG: 0, 

2016 SNAPSHOT_OF_TAG: '', 

2017 SNAPSHOT_TIME_TAG: '', 

2018 TYPE_TAG: type, 

2019 VDI_TYPE_TAG: self.sm_config['vdi_type'], 

2020 READ_ONLY_TAG: 1, 

2021 MANAGED_TAG: 0, 

2022 METADATA_OF_POOL_TAG: '' 

2023 } 

2024 

2025 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) 

2026 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ 

2027 (vdiRef, self.uuid)) 

2028 

2029 # Update the original record 

2030 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2031 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) 

2032 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ 

2033 str(snapVDI.utilisation)) 

2034 

2035 # Return the info on the new snap VDI 

2036 snap = snapVDI2 

2037 if not snap: 2037 ↛ 2038line 2037 didn't jump to line 2038, because the condition on line 2037 was never true

2038 snap = self 

2039 if not basePresent: 

2040 # a single-snapshot of an empty VDI will be a noop, resulting 

2041 # in no new VDIs, so return the existing one. The GC wouldn't 

2042 # normally try to single-snapshot an empty image of course, but 

2043 # if an external snapshot operation manages to sneak in right 

2044 # before a snapshot-coalesce phase, we would get here 

2045 snap = snapVDI 

2046 return snap.get_params() 

2047 

2048 def _setType(self, vdiType: str) -> None: 

2049 self.vdi_type = vdiType 

2050 self.cowutil = getCowUtil(self.vdi_type) 

2051 self.lvmcowutil = LvmCowUtil(self.cowutil) 

2052 

2053 def _initFromVDIInfo(self, vdiInfo): 

2054 self._setType(vdiInfo.vdiType) 

2055 self.lvname = vdiInfo.lvName 

2056 self.size = vdiInfo.sizeVirt 

2057 self.utilisation = vdiInfo.sizeLV 

2058 self.hidden = vdiInfo.hidden 

2059 if self.hidden: 2059 ↛ 2060line 2059 didn't jump to line 2060, because the condition on line 2059 was never true

2060 self.managed = False 

2061 self.active = vdiInfo.lvActive 

2062 self.readonly = vdiInfo.lvReadonly 

2063 self.parent = vdiInfo.parentUuid 

2064 self.path = os.path.join(self.sr.path, self.lvname) 

2065 if hasattr(self, "sm_config_override"): 2065 ↛ 2068line 2065 didn't jump to line 2068, because the condition on line 2065 was never false

2066 self.sm_config_override["vdi_type"] = self.vdi_type 

2067 else: 

2068 self.sm_config_override = {'vdi_type': self.vdi_type} 

2069 self.loaded = True 

2070 

2071 def _initFromLVInfo(self, lvInfo): 

2072 self._setType(lvInfo.vdiType) 

2073 self.lvname = lvInfo.name 

2074 self.size = lvInfo.size 

2075 self.utilisation = lvInfo.size 

2076 self.hidden = lvInfo.hidden 

2077 self.active = lvInfo.active 

2078 self.readonly = lvInfo.readonly 

2079 self.parent = '' 

2080 self.path = os.path.join(self.sr.path, self.lvname) 

2081 if hasattr(self, "sm_config_override"): 2081 ↛ 2084line 2081 didn't jump to line 2084, because the condition on line 2081 was never false

2082 self.sm_config_override["vdi_type"] = self.vdi_type 

2083 else: 

2084 self.sm_config_override = {'vdi_type': self.vdi_type} 

2085 if 'vhd-parent' in self.sm_config_override: 2085 ↛ 2086line 2085 didn't jump to line 2086, because the condition on line 2085 was never true

2086 self.parent = self.sm_config_override['vhd-parent'] 

2087 if not VdiType.isCowImage(self.vdi_type): 2087 ↛ 2088line 2087 didn't jump to line 2088, because the condition on line 2087 was never true

2088 self.loaded = True 

2089 

2090 def _initFromImageInfo(self, imageInfo): 

2091 self.size = imageInfo.sizeVirt 

2092 if self.parent == '' or (imageInfo.parentUuid != '' and imageInfo.parentUuid != self.parent): 2092 ↛ 2094line 2092 didn't jump to line 2094, because the condition on line 2092 was never false

2093 self.parent = imageInfo.parentUuid 

2094 self.hidden = imageInfo.hidden 

2095 self.loaded = True 

2096 

2097 def _determineType(self): 

2098 """ 

2099 Determine whether this is a RAW or a COW VDI. 

2100 """ 

2101 if "vdi_ref" in self.sr.srcmd.params: 2101 ↛ 2114line 2101 didn't jump to line 2114, because the condition on line 2101 was never false

2102 vdi_ref = self.sr.srcmd.params["vdi_ref"] 

2103 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

2104 if sm_config.get("vdi_type"): 2104 ↛ 2105line 2104 didn't jump to line 2105, because the condition on line 2104 was never true

2105 self._setType(sm_config["vdi_type"]) 

2106 prefix = LV_PREFIX[self.vdi_type] 

2107 self.lvname = "%s%s" % (prefix, self.uuid) 

2108 self.path = os.path.join(self.sr.path, self.lvname) 

2109 self.sm_config_override = sm_config 

2110 return True 

2111 

2112 # LVM commands can be costly, so check the file directly first in case 

2113 # the LV is active 

2114 found = False 

2115 for vdi_type, prefix in LV_PREFIX.items(): 

2116 lvname = "%s%s" % (prefix, self.uuid) 

2117 path = os.path.join(self.sr.path, lvname) 

2118 if util.pathexists(path): 

2119 if found: 2119 ↛ 2120line 2119 didn't jump to line 2120, because the condition on line 2119 was never true

2120 raise xs_errors.XenError('VDILoad', 

2121 opterr="multiple VDI's: uuid %s" % self.uuid) 

2122 found = True 

2123 self._setType(vdi_type) 

2124 self.lvname = lvname 

2125 self.path = path 

2126 if found: 

2127 return True 

2128 

2129 # now list all LV's 

2130 if not lvutil._checkVG(self.sr.vgname): 2130 ↛ 2132line 2130 didn't jump to line 2132, because the condition on line 2130 was never true

2131 # when doing attach_from_config, the VG won't be there yet 

2132 return False 

2133 

2134 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache) 

2135 if lvs.get(self.uuid): 2135 ↛ 2138line 2135 didn't jump to line 2138, because the condition on line 2135 was never false

2136 self._initFromLVInfo(lvs[self.uuid]) 

2137 return True 

2138 return False 

2139 

2140 def _loadThis(self): 

2141 """ 

2142 Load VDI info for this VDI and activate the LV if it's COW. We 

2143 don't do it in VDI.load() because not all VDI operations need it. 

2144 """ 

2145 if self.loaded: 2145 ↛ 2146line 2145 didn't jump to line 2146, because the condition on line 2145 was never true

2146 if VdiType.isCowImage(self.vdi_type): 

2147 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2148 return 

2149 try: 

2150 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache, self.lvname) 

2151 except util.CommandException as e: 

2152 raise xs_errors.XenError('VDIUnavailable', 

2153 opterr='%s (LV scan error)' % os.strerror(abs(e.code))) 

2154 if not lvs.get(self.uuid): 2154 ↛ 2155line 2154 didn't jump to line 2155, because the condition on line 2154 was never true

2155 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') 

2156 self._initFromLVInfo(lvs[self.uuid]) 

2157 if VdiType.isCowImage(self.vdi_type): 2157 ↛ 2163line 2157 didn't jump to line 2163, because the condition on line 2157 was never false

2158 self.sr.lvActivator.activate(self.uuid, self.lvname, False) 

2159 imageInfo = self.cowutil.getInfo(self.path, LvmCowUtil.extractUuid, False) 

2160 if not imageInfo: 2160 ↛ 2161line 2160 didn't jump to line 2161, because the condition on line 2160 was never true

2161 raise xs_errors.XenError('VDIUnavailable', opterr='getInfo failed') 

2162 self._initFromImageInfo(imageInfo) 

2163 self.loaded = True 

2164 

2165 def _chainSetActive(self, active, binary, persistent=False): 

2166 if binary: 2166 ↛ 2167line 2166 didn't jump to line 2167, because the condition on line 2166 was never true

2167 (count, bcount) = RefCounter.checkLocked(self.uuid, 

2168 NS_PREFIX_LVM + self.sr.uuid) 

2169 if (active and bcount > 0) or (not active and bcount == 0): 

2170 return # this is a redundant activation/deactivation call 

2171 

2172 vdiList = {self.uuid: self.lvname} 

2173 if VdiType.isCowImage(self.vdi_type): 2173 ↛ 2175line 2173 didn't jump to line 2175, because the condition on line 2173 was never false

2174 vdiList = self.cowutil.getParentChain(self.lvname, LvmCowUtil.extractUuid, self.sr.vgname) 

2175 for uuid, lvName in vdiList.items(): 2175 ↛ 2176line 2175 didn't jump to line 2176, because the loop on line 2175 never started

2176 binaryParam = binary 

2177 if uuid != self.uuid: 

2178 binaryParam = False # binary param only applies to leaf nodes 

2179 if active: 

2180 self.sr.lvActivator.activate(uuid, lvName, binaryParam, 

2181 persistent) 

2182 else: 

2183 # just add the LVs for deactivation in the final (cleanup) 

2184 # step. The LVs must not have been activated during the current 

2185 # operation 

2186 self.sr.lvActivator.add(uuid, lvName, binaryParam) 

2187 

2188 def _failClone(self, uuid, jval, msg): 

2189 try: 

2190 self.sr._handleInterruptedCloneOp(uuid, jval, True) 

2191 self.sr.journaler.remove(self.JRN_CLONE, uuid) 

2192 except Exception as e: 

2193 util.SMlog('WARNING: failed to clean up failed snapshot: ' \ 

2194 ' %s (error ignored)' % e) 

2195 raise xs_errors.XenError('VDIClone', opterr=msg) 

2196 

2197 def _markHidden(self): 

2198 if not VdiType.isCowImage(self.vdi_type): 

2199 self.sr.lvmCache.setHidden(self.lvname) 

2200 else: 

2201 self.cowutil.setHidden(self.path) 

2202 self.hidden = 1 

2203 

2204 def _prepareThin(self, attach, vdiType): 

2205 origUtilisation = self.sr.lvmCache.getSize(self.lvname) 

2206 if self.sr.isMaster: 

2207 # the master can prepare the VDI locally 

2208 if attach: 

2209 self.lvmcowutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type) 

2210 else: 

2211 self.lvmcowutil.detachThin(self.session, self.sr.lvmCache, self.sr.uuid, self.uuid, self.vdi_type) 

2212 else: 

2213 fn = "attach" 

2214 if not attach: 

2215 fn = "detach" 

2216 pools = self.session.xenapi.pool.get_all() 

2217 master = self.session.xenapi.pool.get_master(pools[0]) 

2218 rv = self.session.xenapi.host.call_plugin( 

2219 master, 

2220 self.sr.THIN_PLUGIN, 

2221 fn, 

2222 { 

2223 "srUuid": self.sr.uuid, 

2224 "vdiUuid": self.uuid, 

2225 "vdiType": vdiType 

2226 } 

2227 ) 

2228 util.SMlog("call-plugin returned: %s" % rv) 

2229 if not rv: 

2230 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) 

2231 # refresh to pick up the size change on this slave 

2232 self.sr.lvmCache.activateNoRefcount(self.lvname, True) 

2233 

2234 self.utilisation = self.sr.lvmCache.getSize(self.lvname) 

2235 if origUtilisation != self.utilisation: 

2236 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

2237 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, 

2238 str(self.utilisation)) 

2239 stats = lvutil._getVGstats(self.sr.vgname) 

2240 sr_utilisation = stats['physical_utilisation'] 

2241 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, 

2242 str(sr_utilisation)) 

2243 

2244 @override 

2245 def update(self, sr_uuid, vdi_uuid) -> None: 

2246 if self.sr.legacyMode: 

2247 return 

2248 

2249 #Synch the name_label of this VDI on storage with the name_label in XAPI 

2250 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) 

2251 update_map = {} 

2252 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ 

2253 METADATA_OBJECT_TYPE_VDI 

2254 update_map[UUID_TAG] = self.uuid 

2255 update_map[NAME_LABEL_TAG] = util.to_plain_string( \ 

2256 self.session.xenapi.VDI.get_name_label(vdi_ref)) 

2257 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \ 

2258 self.session.xenapi.VDI.get_name_description(vdi_ref)) 

2259 update_map[SNAPSHOT_TIME_TAG] = \ 

2260 self.session.xenapi.VDI.get_snapshot_time(vdi_ref) 

2261 update_map[METADATA_OF_POOL_TAG] = \ 

2262 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) 

2263 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map) 

2264 

2265 @override 

2266 def _ensure_cbt_space(self) -> None: 

2267 self.sr.ensureCBTSpace() 

2268 

2269 @override 

2270 def _create_cbt_log(self) -> str: 

2271 logname = self._get_cbt_logname(self.uuid) 

2272 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) 

2273 logpath = super(LVMVDI, self)._create_cbt_log() 

2274 self.sr.lvmCache.deactivateNoRefcount(logname) 

2275 return logpath 

2276 

2277 @override 

2278 def _delete_cbt_log(self) -> None: 

2279 logpath = self._get_cbt_logpath(self.uuid) 

2280 if self._cbt_log_exists(logpath): 

2281 logname = self._get_cbt_logname(self.uuid) 

2282 self.sr.lvmCache.remove(logname) 

2283 

2284 @override 

2285 def _rename(self, oldpath, newpath) -> None: 

2286 oldname = os.path.basename(oldpath) 

2287 newname = os.path.basename(newpath) 

2288 self.sr.lvmCache.rename(oldname, newname) 

2289 

2290 @override 

2291 def _activate_cbt_log(self, lv_name) -> bool: 

2292 self.sr.lvmCache.refresh() 

2293 if not self.sr.lvmCache.is_active(lv_name): 2293 ↛ 2294line 2293 didn't jump to line 2294, because the condition on line 2293 was never true

2294 try: 

2295 self.sr.lvmCache.activateNoRefcount(lv_name) 

2296 return True 

2297 except Exception as e: 

2298 util.SMlog("Exception in _activate_cbt_log, " 

2299 "Error: %s." % str(e)) 

2300 raise 

2301 else: 

2302 return False 

2303 

2304 @override 

2305 def _deactivate_cbt_log(self, lv_name) -> None: 

2306 try: 

2307 self.sr.lvmCache.deactivateNoRefcount(lv_name) 

2308 except Exception as e: 

2309 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e)) 

2310 raise 

2311 

2312 @override 

2313 def _cbt_log_exists(self, logpath) -> bool: 

2314 return lvutil.exists(logpath) 

2315 

2316if __name__ == '__main__': 2316 ↛ 2317line 2316 didn't jump to line 2317, because the condition on line 2316 was never true

2317 SRCommand.run(LVMSR, DRIVER_INFO) 

2318else: 

2319 SR.registerSR(LVMSR)