本文整理汇总了Python中vdsm.config.config.getint函数的典型用法代码示例。如果您正苦于以下问题:Python getint函数的具体用法?Python getint怎么用?Python getint使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getint函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, vm, dst='', dstparams='',
mode=MODE_REMOTE, method=METHOD_ONLINE,
tunneled=False, dstqemu='', abortOnError=False,
consoleAddress=None, compressed=False,
autoConverge=False, **kwargs):
self.log = vm.log
self._vm = vm
self._dst = dst
self._mode = mode
if method != METHOD_ONLINE:
self.log.warning(
'migration method %s is deprecated, forced to "online"',
method)
self._dstparams = dstparams
self._machineParams = {}
self._tunneled = utils.tobool(tunneled)
self._abortOnError = utils.tobool(abortOnError)
self._consoleAddress = consoleAddress
self._dstqemu = dstqemu
self._downtime = kwargs.get('downtime') or \
config.get('vars', 'migration_downtime')
self._maxBandwidth = int(
kwargs.get('maxBandwidth') or
config.getint('vars', 'migration_max_bandwidth')
)
self._autoConverge = autoConverge
self._compressed = compressed
self.status = {
'status': {
'code': 0,
'message': 'Migration in progress'}}
self._progress = 0
threading.Thread.__init__(self)
self._preparingMigrationEvt = True
self._migrationCanceledEvt = False
self._monitorThread = None
self._destServer = None
progress_timeout = config.getint('vars', 'migration_progress_timeout')
self._convergence_schedule = {
'init': [],
'stalling': [
{
'limit': progress_timeout,
'action': {
'name': CONVERGENCE_SCHEDULE_SET_ABORT,
'params': []
}
}
]
}
self._use_convergence_schedule = False
if 'convergenceSchedule' in kwargs:
self._convergence_schedule = kwargs.get('convergenceSchedule')
self._use_convergence_schedule = True
self.log.debug('convergence schedule set to: %s',
str(self._convergence_schedule))
开发者ID:kanalun,项目名称:vdsm,代码行数:60,代码来源:migration.py
示例2: start
def start(cif):
global _operations
_scheduler.start()
_executor.start()
def per_vm_operation(func, period):
disp = VmDispatcher(cif.getVMs, _executor, func, _timeout_from(period))
return Operation(disp, period)
_operations = [
# needs dispatching becuse updating the volume stats needs the
# access the storage, thus can block.
per_vm_operation(UpdateVolumes, config.getint("irs", "vol_size_sample_interval")),
# needs dispatching becuse access FS and libvirt data
per_vm_operation(NumaInfoMonitor, config.getint("vars", "vm_sample_numa_interval")),
# Job monitoring need QEMU monitor access.
per_vm_operation(BlockjobMonitor, config.getint("vars", "vm_sample_jobs_interval")),
# libvirt sampling using bulk stats can block, but unresponsive
# domains are handled inside VMBulkSampler for performance reasons;
# thus, does not need dispatching.
Operation(
sampling.VMBulkSampler(libvirtconnection.get(cif), cif.getVMs, sampling.stats_cache),
config.getint("vars", "vm_sample_interval"),
),
# we do this only until we get high water mark notifications
# from qemu. Access storage and/or qemu monitor, so can block,
# thus we need dispatching.
per_vm_operation(DriveWatermarkMonitor, config.getint("vars", "vm_watermark_interval")),
]
for op in _operations:
op.start()
开发者ID:kvaps,项目名称:vdsm,代码行数:33,代码来源:periodic.py
示例3: __init__
def __init__(self, poolID, maxHostID, inbox, outbox, monitorInterval=2):
"""
Note: inbox paramerter here should point to the HSM's outbox
mailbox file, and vice versa.
"""
self._messageTypes = {}
# Save arguments
self._stop = False
self._stopped = False
self._poolID = poolID
tpSize = config.getint('irs', 'thread_pool_size') / 2
waitTimeout = wait_timeout(monitorInterval)
maxTasks = config.getint('irs', 'max_tasks')
self.tp = ThreadPool("mailbox-spm", tpSize, waitTimeout, maxTasks)
self._inbox = inbox
if not os.path.exists(self._inbox):
self.log.error("SPM_MailMonitor create failed - inbox %s does not "
"exist" % repr(self._inbox))
raise RuntimeError("SPM_MailMonitor create failed - inbox %s does "
"not exist" % repr(self._inbox))
self._outbox = outbox
if not os.path.exists(self._outbox):
self.log.error("SPM_MailMonitor create failed - outbox %s does "
"not exist" % repr(self._outbox))
raise RuntimeError("SPM_MailMonitor create failed - outbox %s "
"does not exist" % repr(self._outbox))
self._numHosts = int(maxHostID)
self._outMailLen = MAILBOX_SIZE * self._numHosts
self._monitorInterval = monitorInterval
# TODO: add support for multiple paths (multiple mailboxes)
self._outgoingMail = self._outMailLen * "\0"
self._incomingMail = self._outgoingMail
self._inCmd = ['dd',
'if=' + str(self._inbox),
'iflag=direct,fullblock',
'count=1'
]
self._outCmd = ['dd',
'of=' + str(self._outbox),
'oflag=direct',
'iflag=fullblock',
'conv=notrunc',
'count=1'
]
self._outLock = threading.Lock()
self._inLock = threading.Lock()
# Clear outgoing mail
self.log.debug("SPM_MailMonitor - clearing outgoing mail, command is: "
"%s", self._outCmd)
cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
(rc, out, err) = _mboxExecCmd(cmd, data=self._outgoingMail)
if rc:
self.log.warning("SPM_MailMonitor couldn't clear outgoing mail, "
"dd failed")
self._thread = concurrent.thread(
self.run, name="mailbox-spm", log=self.log)
self._thread.start()
self.log.debug('SPM_MailMonitor created for pool %s' % self._poolID)
开发者ID:EdDev,项目名称:vdsm,代码行数:59,代码来源:mailbox.py
示例4: __init__
def __init__(self, irs, log, scheduler):
"""
Initialize the (single) clientIF instance
:param irs: a Dispatcher object to be used as this object's irs.
:type irs: :class:`storage.dispatcher.Dispatcher`
:param log: a log object to be used for this object's logging.
:type log: :class:`logging.Logger`
"""
self.vmContainerLock = threading.Lock()
self._networkSemaphore = threading.Semaphore()
self._shutdownSemaphore = threading.Semaphore()
self.irs = irs
if self.irs:
self._contEIOVmsCB = partial(clientIF.contEIOVms, proxy(self))
self.irs.registerDomainStateChangeCallback(self._contEIOVmsCB)
self.log = log
self._recovery = True
self.channelListener = Listener(self.log)
self._generationID = str(uuid.uuid4())
self.mom = None
self.bindings = {}
self._broker_client = None
self._subscriptions = defaultdict(list)
self._scheduler = scheduler
if _glusterEnabled:
self.gluster = gapi.GlusterApi(self, log)
else:
self.gluster = None
try:
self.vmContainer = {}
self._hostStats = sampling.HostStatsThread(
sampling.host_samples)
self._hostStats.start()
self.lastRemoteAccess = 0
self._enabled = True
self._netConfigDirty = False
self._prepareMOM()
secret.clear()
concurrent.thread(self._recoverThread, name='clientIFinit').start()
self.channelListener.settimeout(
config.getint('vars', 'guest_agent_timeout'))
self.channelListener.start()
self.threadLocal = threading.local()
self.threadLocal.client = ''
host = config.get('addresses', 'management_ip')
port = config.getint('addresses', 'management_port')
self._createAcceptor(host, port)
self._prepareXMLRPCBinding()
self._prepareJSONRPCBinding()
self._connectToBroker()
except:
self.log.error('failed to init clientIF, '
'shutting down storage dispatcher')
if self.irs:
self.irs.prepareForShutdown()
raise
开发者ID:borisroman,项目名称:vdsm,代码行数:59,代码来源:clientIF.py
示例5: __init__
def __init__(self,
tpSize=config.getint('irs', 'thread_pool_size'),
waitTimeout=3,
maxTasks=config.getint('irs', 'max_tasks')):
self.storage_repository = config.get('irs', 'repository')
self.tp = ThreadPool(tpSize, waitTimeout, maxTasks)
self._tasks = {}
self._unqueuedTasks = []
开发者ID:humblec,项目名称:vdsm,代码行数:8,代码来源:taskManager.py
示例6: __init__
def __init__(self,
tpSize=config.getint('irs', 'thread_pool_size'),
waitTimeout=3,
maxTasks=config.getint('irs', 'max_tasks')):
self.tp = ThreadPool("tasks", tpSize, waitTimeout, maxTasks)
self._tasks = {}
self._unqueuedTasks = []
self._insertTaskLock = threading.Lock()
开发者ID:nirs,项目名称:vdsm,代码行数:8,代码来源:taskManager.py
示例7: _regular_run
def _regular_run(self):
self.log.debug("Starting migration source thread")
self._recovery = False
self._update_outgoing_limit()
try:
startTime = time.time()
machineParams = self._setupRemoteMachineParams()
self._setupVdsConnection()
self._prepareGuest()
while not self._started:
try:
self.log.info("Migration semaphore: acquiring")
with SourceThread.ongoingMigrations:
self.log.info("Migration semaphore: acquired")
timeout = config.getint(
'vars', 'guest_lifecycle_event_reply_timeout')
if self.hibernating:
self._vm.guestAgent.events.before_hibernation(
wait_timeout=timeout)
elif self._enableGuestEvents:
self._vm.guestAgent.events.before_migration(
wait_timeout=timeout)
if self._migrationCanceledEvt.is_set():
self._raiseAbortError()
self.log.debug("migration semaphore acquired "
"after %d seconds",
time.time() - startTime)
migrationParams = {
'dst': self._dst,
'mode': self._mode,
'method': METHOD_ONLINE,
'dstparams': self._dstparams,
'dstqemu': self._dstqemu,
}
self._startUnderlyingMigration(
time.time(), migrationParams, machineParams
)
self._finishSuccessfully(machineParams)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_OPERATION_ABORTED:
self.status = response.error(
'migCancelErr', message='Migration canceled')
raise
except MigrationLimitExceeded:
retry_timeout = config.getint('vars',
'migration_retry_timeout')
self.log.debug("Migration destination busy. Initiating "
"retry in %d seconds.", retry_timeout)
self._migrationCanceledEvt.wait(retry_timeout)
except MigrationDestinationSetupError as e:
self._recover(str(e))
# we know what happened, no need to dump hollow stack trace
except Exception as e:
self._recover(str(e))
self.log.exception("Failed to migrate")
开发者ID:nirs,项目名称:vdsm,代码行数:56,代码来源:migration.py
示例8: get
def get():
caps = {}
caps['kvmEnabled'] = \
str(config.getboolean('vars', 'fake_kvm_support') or
os.path.exists('/dev/kvm')).lower()
cpuInfo = CpuInfo()
cpuTopology = CpuTopology()
if config.getboolean('vars', 'report_host_threads_as_cores'):
caps['cpuCores'] = str(cpuTopology.threads())
else:
caps['cpuCores'] = str(cpuTopology.cores())
caps['cpuThreads'] = str(cpuTopology.threads())
caps['cpuSockets'] = str(cpuTopology.sockets())
caps['cpuSpeed'] = cpuInfo.mhz()
if config.getboolean('vars', 'fake_kvm_support'):
caps['cpuModel'] = 'Intel(Fake) CPU'
flags = set(cpuInfo.flags() + ['vmx', 'sse2', 'nx'])
caps['cpuFlags'] = ','.join(flags) + 'model_486,model_pentium,' \
'model_pentium2,model_pentium3,model_pentiumpro,model_qemu32,' \
'model_coreduo,model_core2duo,model_n270,model_Conroe,' \
'model_Penryn,model_Nehalem,model_Opteron_G1'
else:
caps['cpuModel'] = cpuInfo.model()
caps['cpuFlags'] = ','.join(cpuInfo.flags() +
_getCompatibleCpuModels())
caps.update(dsaversion.version_info)
caps.update(netinfo.get())
try:
caps['hooks'] = hooks.installed()
except:
logging.debug('not reporting hooks', exc_info=True)
caps['operatingSystem'] = osversion()
caps['uuid'] = utils.getHostUUID()
caps['packages2'] = _getKeyPackages()
caps['emulatedMachines'] = _getEmulatedMachines()
caps['ISCSIInitiatorName'] = _getIscsiIniName()
caps['HBAInventory'] = storage.hba.HBAInventory()
caps['vmTypes'] = ['kvm']
caps['memSize'] = str(utils.readMemInfo()['MemTotal'] / 1024)
caps['reservedMem'] = str(config.getint('vars', 'host_mem_reserve') +
config.getint('vars', 'extra_mem_reserve'))
caps['guestOverhead'] = config.get('vars', 'guest_ram_overhead')
return caps
开发者ID:edwardbadboy,项目名称:vdsm-ubuntu,代码行数:51,代码来源:caps.py
示例9: __init__
def __init__(self, vm, downtime):
super(DowntimeThread, self).__init__()
self.DOWNTIME_STEPS = config.getint('vars', 'migration_downtime_steps')
self._vm = vm
self._downtime = downtime
self._stop = threading.Event()
delay_per_gib = config.getint('vars', 'migration_downtime_delay')
memSize = int(vm.conf['memSize'])
self._wait = (delay_per_gib * max(memSize, 2048) + 1023) / 1024
self.daemon = True
self.start()
开发者ID:mpavlase,项目名称:vdsm,代码行数:14,代码来源:migration.py
示例10: forceScsiScan
def forceScsiScan():
processes = []
minTimeout = config.getint('irs', 'scsi_rescan_minimal_timeout')
maxTimeout = config.getint('irs', 'scsi_rescan_maximal_timeout')
for hba in glob.glob(SCAN_PATTERN):
cmd = [constants.EXT_DD, 'of=' + hba]
p = misc.execCmd(cmd, sudo=False, sync=False)
try:
p.stdin.write("- - -")
p.stdin.flush()
p.stdin.close()
except OSError as e:
if p.wait(0) is False:
log.error("pid %s still running", p.pid)
log.warning("Error in rescan of hba:%s with returncode:%s and "
"error message: %s", hba, p.returncode,
p.stderr.read(1000))
if e.errno != errno.EPIPE:
raise
else:
log.warning("Ignoring error in rescan of hba %s: ",
hba, exc_info=True)
continue
processes.append((hba, p))
if (minTimeout > maxTimeout or minTimeout < 0):
minTimeout = 2
maxTimeout = 30
log.warning("One of the following configuration arguments has an "
"illegal value: scsi_rescan_minimal_timeout or "
"scsi_rescan_maximal_timeout. Set to %s and %s seconds "
"respectively.", minTimeout, maxTimeout)
log.debug("Performing SCSI scan, this will take up to %s seconds",
maxTimeout)
time.sleep(minTimeout)
for i in xrange(maxTimeout - minTimeout):
for p in processes[:]:
(hba, proc) = p
if proc.wait(0):
if proc.returncode != 0:
log.warning('returncode for: %s is: %s', hba,
proc.returncode)
processes.remove(p)
if not processes:
break
else:
time.sleep(1)
else:
log.warning("Still waiting for scsi scan of hbas: %s",
tuple(hba for p in processes))
开发者ID:hackxay,项目名称:vdsm,代码行数:49,代码来源:iscsi.py
示例11: shutdown
def shutdown(self, timeout, message):
try:
now = time.time()
if self.lastStatus == 'Down':
return
if self.guestAgent and self.guestAgent.isResponsive():
self._guestEventTime = now
self._guestEvent = 'Powering down'
self.log.debug('guestAgent shutdown called')
self.guestAgent.desktopShutdown(timeout, message)
agent_timeout = int(timeout) + config.getint('vars', 'sys_shutdown_timeout')
timer = threading.Timer(agent_timeout, self._timedShutdown)
timer.start()
elif self.conf['acpiEnable'].lower() == "true":
self._guestEventTime = now
self._guestEvent = 'Powering down'
self._acpiShutdown()
# No tools, no ACPI
else:
return {'status': {'code': errCode['exist']['status']['code'],
'message': 'VM without ACPI or active SolidICE tools. Try Forced Shutdown.'}}
except:
self.log.error("Shutdown failed", exc_info=True)
return {'status': {'code': doneCode['code'],
'message': 'Machine shut down'}}
开发者ID:ekohl,项目名称:vdsm,代码行数:25,代码来源:vm.py
示例12: _lvExtend
def _lvExtend(self, block_dev, newsize=None):
volID = None
for d in self._devices[DISK_DEVICES]:
if not d.blockDev: continue
if d.name != block_dev: continue
if newsize is None:
newsize = config.getint('irs',
'volume_utilization_chunk_mb') + (d.apparentsize + 2**20
- 1) / 2**20
# TODO cap newsize by max volume size
volDict = {'poolID': d.poolID, 'domainID': d.domainID,
'imageID': d.imageID, 'volumeID': d.volumeID}
d.needExtend = True
d.reqsize = newsize
# sendExtendMsg expects size in bytes
self.cif.irs.sendExtendMsg(d.poolID, volDict, newsize * 2**20,
self._afterLvExtend)
self.log.debug('%s/%s (%s): apparentsize %s req %s', d.domainID,
d.volumeID, d.name, d.apparentsize / constants.MEGAB,
newsize) #in MiB
volID = d.volumeID
break
# store most recently requested size in conf, to be re-requested on
# migration destination
for dev in self.conf['devices']:
if dev['type'] == DISK_DEVICES and dev.get('volumeID') == volID:
dev['reqsize'] = str(newsize)
开发者ID:ekohl,项目名称:vdsm,代码行数:29,代码来源:vm.py
示例13: _wait_for_shutting_down_vms
def _wait_for_shutting_down_vms(self):
"""
Wait loop checking remaining VMs in vm container
This method is helper method that highers the
probability of engine to properly acknowledge
that all VMs are terminated by host shutdown.
The VMs are shutdown by external service: libvirt-guests
The service pauses system shutdown on systemd shutdown
and gracefully shutdowns the running VMs.
This method applies only when the host is in shutdown.
If the host is running, the method ends immediately.
"""
# how long to wait before release shutdown
# we are waiting in whole seconds
# if config is not present, do not wait
timeout = config.getint('vars', 'timeout_engine_clear_vms')
# time to wait in the final phase in seconds
# it allows host to flush its final state to the engine
final_wait = 2
if not host_in_shutdown():
return
self.log.info('host in shutdown waiting')
for _ in range((timeout - final_wait) * 10):
if not self.vmContainer:
# once all VMs are cleared exit
break
time.sleep(0.1)
time.sleep(final_wait)
开发者ID:nirs,项目名称:vdsm,代码行数:35,代码来源:clientIF.py
示例14: _recoverExistingVms
def _recoverExistingVms(self):
start_time = utils.monotonic_time()
try:
self.log.debug('recovery: started')
# Starting up libvirt might take long when host under high load,
# we prefer running this code in external thread to avoid blocking
# API response.
mog = min(config.getint('vars', 'max_outgoing_migrations'),
numa.cpu_topology().cores)
migration.SourceThread.setMaxOutgoingMigrations(mog)
recovery.all_vms(self)
# recover stage 3: waiting for domains to go up
self._waitForDomainsUp()
recovery.clean_vm_files(self)
self._recovery = False
# Now if we have VMs to restore we should wait pool connection
# and then prepare all volumes.
# Actually, we need it just to get the resources for future
# volumes manipulations
self._waitForStoragePool()
self._preparePathsForRecoveredVMs()
self.log.info('recovery: completed in %is',
utils.monotonic_time() - start_time)
except:
self.log.exception("recovery: failed")
raise
开发者ID:kanalun,项目名称:vdsm,代码行数:35,代码来源:clientIF.py
示例15: _setupVdsConnection
def _setupVdsConnection(self):
if self.hibernating:
return
hostPort = vdscli.cannonizeHostPort(
self._dst,
config.getint('addresses', 'management_port'))
self.remoteHost, port = hostPort.rsplit(':', 1)
try:
client = self._createClient(port)
requestQueues = config.get('addresses', 'request_queues')
requestQueue = requestQueues.split(",")[0]
self._destServer = jsonrpcvdscli.connect(requestQueue, client)
self.log.debug('Initiating connection with destination')
self._destServer.ping()
except (JsonRpcBindingsError, JsonRpcNoResponseError):
if config.getboolean('vars', 'ssl'):
self._destServer = vdscli.connect(
hostPort,
useSSL=True,
TransportClass=kaxmlrpclib.TcpkeepSafeTransport)
else:
self._destServer = kaxmlrpclib.Server('http://' + hostPort)
self.log.debug('Destination server is: ' + hostPort)
开发者ID:mykaul,项目名称:vdsm,代码行数:27,代码来源:migration.py
示例16: _loadBindingJsonRpc
def _loadBindingJsonRpc(self):
from BindingJsonRpc import BindingJsonRpc
from Bridge import DynamicBridge
ip = config.get('addresses', 'management_ip')
port = config.getint('addresses', 'json_port')
conf = [('tcp', {"ip": ip, "port": port})]
self.bindings['json'] = BindingJsonRpc(DynamicBridge(), conf)
开发者ID:therealmik,项目名称:vdsm,代码行数:7,代码来源:clientIF.py
示例17: _mem_committed
def _mem_committed(mem_size_mb):
"""
Legacy algorithm found in oVirt <= 4.1
"""
memory = mem_size_mb
memory += config.getint('vars', 'guest_ram_overhead')
return 2 ** 20 * memory
开发者ID:oVirt,项目名称:vdsm,代码行数:7,代码来源:vmoperations_test.py
示例18: _perform_migration
def _perform_migration(self, duri, muri):
if self._vm.hasSpice and self._vm.conf.get('clientIp'):
SPICE_MIGRATION_HANDOVER_TIME = 120
self._vm._reviveTicket(SPICE_MIGRATION_HANDOVER_TIME)
maxBandwidth = config.getint('vars', 'migration_max_bandwidth')
# FIXME: there still a race here with libvirt,
# if we call stop() and libvirt migrateToURI3 didn't start
# we may return migration stop but it will start at libvirt
# side
self._preparingMigrationEvt = False
if not self._migrationCanceledEvt:
# TODO: use libvirt constants when bz#1222795 is fixed
params = {VIR_MIGRATE_PARAM_URI: str(muri),
VIR_MIGRATE_PARAM_BANDWIDTH: maxBandwidth}
flags = (libvirt.VIR_MIGRATE_LIVE |
libvirt.VIR_MIGRATE_PEER2PEER |
(libvirt.VIR_MIGRATE_TUNNELLED if
self._tunneled else 0) |
(libvirt.VIR_MIGRATE_ABORT_ON_ERROR if
self._abortOnError else 0) |
(libvirt.VIR_MIGRATE_COMPRESSED if
self._compressed else 0) |
(libvirt.VIR_MIGRATE_AUTO_CONVERGE if
self._autoConverge else 0))
self._vm._dom.migrateToURI3(duri, params, flags)
else:
self._raiseAbortError()
开发者ID:txomon,项目名称:vdsm,代码行数:30,代码来源:migration.py
示例19: calculate_volume_alloc_size
def calculate_volume_alloc_size(cls, preallocate, capacity, initial_size):
""" Calculate the allocation size in mb of the volume
'preallocate' - Sparse or Preallocated
'capacity' - the volume size in blocks
'initial_size' - optional, if provided the initial allocated
size in blocks for sparse volumes
"""
if initial_size and preallocate == sc.PREALLOCATED_VOL:
log.error("Initial size is not supported for preallocated volumes")
raise se.InvalidParameterException("initial size",
initial_size)
if initial_size:
capacity_bytes = capacity * sc.BLOCK_SIZE
initial_size_bytes = initial_size * sc.BLOCK_SIZE
max_size = cls.max_size(capacity_bytes, sc.COW_FORMAT)
if initial_size_bytes > max_size:
log.error("The requested initial %s is bigger "
"than the max size %s", initial_size_bytes, max_size)
raise se.InvalidParameterException("initial size",
initial_size)
if preallocate == sc.SPARSE_VOL:
if initial_size:
initial_size = int(initial_size * QCOW_OVERHEAD_FACTOR)
alloc_size = (utils.round(initial_size, BLOCKS_TO_MB) //
BLOCKS_TO_MB)
else:
alloc_size = config.getint("irs",
"volume_utilization_chunk_mb")
else:
alloc_size = utils.round(capacity, BLOCKS_TO_MB) // BLOCKS_TO_MB
return alloc_size
开发者ID:nirs,项目名称:vdsm,代码行数:34,代码来源:blockVolume.py
示例20: _perform_migration
def _perform_migration(self, duri, muri):
if self._vm.hasSpice and self._vm.conf.get('clientIp'):
SPICE_MIGRATION_HANDOVER_TIME = 120
self._vm._reviveTicket(SPICE_MIGRATION_HANDOVER_TIME)
maxBandwidth = config.getint('vars', 'migration_max_bandwidth')
# FIXME: there still a race here with libvirt,
# if we call stop() and libvirt migrateToURI2 didn't start
# we may return migration stop but it will start at libvirt
# side
self._preparingMigrationEvt = False
if not self._migrationCanceledEvt:
self._vm._dom.migrateToURI2(
duri, muri, None,
libvirt.VIR_MIGRATE_LIVE |
libvirt.VIR_MIGRATE_PEER2PEER |
(libvirt.VIR_MIGRATE_TUNNELLED if
self._tunneled else 0) |
(libvirt.VIR_MIGRATE_ABORT_ON_ERROR if
self._abortOnError else 0) |
(libvirt.VIR_MIGRATE_COMPRESSED if
self._compressed else 0) |
(libvirt.VIR_MIGRATE_AUTO_CONVERGE if
self._autoConverge else 0),
None, maxBandwidth)
else:
self._raiseAbortError()
开发者ID:HongweiBi,项目名称:vdsm,代码行数:27,代码来源:migration.py
注:本文中的vdsm.config.config.getint函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论