本文整理汇总了Python中neutron.openstack.common.lockutils.lock函数的典型用法代码示例。如果您正苦于以下问题:Python lock函数的具体用法?Python lock怎么用?Python lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lock函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: delete_flowrule
def delete_flowrule(self, context, id):
'''
called by a DELETE to /flowrules/id
in here 3 methods are called:
delete_flowrule_precommit -> mechanism driver method (it should perform some delete calls in the "flowmods" DB table the flowmods to be instanciated
delete_flowrule -> calls delete_flowrule in db_base_plugin_v2.py (it deletes the flowrule from the "flowrules" table plus some checks)
delete_flowrule_postcommit -> mechanism driver method to sync all flowmods with ODL
'''
# The id is the unique identifier that can be used to delete
# the row entry of your database.
LOG.debug(_("delete_flowrule with id "+str(id)))
#no return value required
session = context.session
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
flowrule = self.get_flowrule(context, id)
mech_context = driver_context.FlowruleContext(self,
context,
flowrule)
self.mechanism_manager.delete_flowrule_precommit(mech_context)
try: #check if the flowrule exists in the DB
flowrule_db = (session.query(models_v2.Flowrule).
enable_eagerloads(False).
filter_by(id=id).with_lockmode('update').one())
except sa_exc.NoResultFound:
LOG.debug(_("The flowrule '%s' does not exist anymore"), id)
return
flowrule = self._make_flowrule_dict(flowrule_db)
super(Ml2Plugin, self).delete_flowrule(context, id)
#TBD move out of the transaction
self.mechanism_manager.delete_flowrule_postcommit(mech_context)
开发者ID:netgroup-polito,项目名称:frog3,代码行数:34,代码来源:plugin.py
示例2: _txn_from_context
def _txn_from_context(self, context, tag="<unset>"):
"""Context manager: opens a DB transaction against the given context.
If required, this also takes the Neutron-wide db-access semaphore.
:return: context manager for use with with:.
"""
session = context.session
conn_url = str(session.connection().engine.url).lower()
if (conn_url.startswith("mysql:") or
conn_url.startswith("mysql+mysqldb:")):
# Neutron is using the mysqldb driver for accessing the database.
# This has a known incompatibility with eventlet that leads to
# deadlock. Take the neutron-wide db-access lock as a workaround.
# See https://bugs.launchpad.net/oslo.db/+bug/1350149 for a
# description of the issue.
LOG.debug("Waiting for db-access lock tag=%s...", tag)
try:
with lockutils.lock('db-access'):
LOG.debug("...acquired db-access lock tag=%s", tag)
with context.session.begin(subtransactions=True) as txn:
yield txn
finally:
LOG.debug("Released db-access lock tag=%s", tag)
else:
# Liberty or later uses an eventlet-safe mysql library. (Or, we're
# not using mysql at all.)
LOG.debug("Not using mysqldb driver, skipping db-access lock")
with context.session.begin(subtransactions=True) as txn:
yield txn
开发者ID:nbartos,项目名称:networking-calico,代码行数:30,代码来源:mech_calico.py
示例3: update_port_status
def update_port_status(self, context, port_id, status, host=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
"""
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if port.status != status:
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
return port['id']
开发者ID:aignatov,项目名称:neutron,代码行数:34,代码来源:plugin.py
示例4: update_port_status
def update_port_status(self, context, port_id, status):
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to prevent
# undesired eventlet yields leading to 'lock wait timeout' errors
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return False
if port.status != status:
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
mech_context = driver_context.PortContext(
self, context, updated_port, network,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
return True
开发者ID:ArifovicH,项目名称:neutron,代码行数:28,代码来源:plugin.py
示例5: _set
def _set(self, key, value, ttl=0, not_exists=False):
with lockutils.lock(key):
# NOTE(flaper87): This is needed just in `set`
# calls, hence it's not in `_set_unlocked`
if not_exists and self._exists_unlocked(key):
return False
self._set_unlocked(key, value, ttl)
return True
开发者ID:CiscoSystems,项目名称:neutron,代码行数:10,代码来源:memory.py
示例6: _incr_append
def _incr_append(self, key, other):
with lockutils.lock(key):
timeout, value = self._get_unlocked(key)
if value is None:
return None
ttl = timeutils.utcnow_ts() - timeout
new_value = value + other
self._set_unlocked(key, new_value, ttl)
return new_value
开发者ID:CiscoSystems,项目名称:neutron,代码行数:11,代码来源:memory.py
示例7: _apply
def _apply(self):
lock_name = "iptables"
if self.namespace:
lock_name += "-" + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug('Got semaphore / lock "%s"', lock_name)
return self._apply_synchronized()
finally:
LOG.debug('Semaphore / lock released "%s"', lock_name)
开发者ID:asadoughi,项目名称:neutron,代码行数:11,代码来源:iptables_manager.py
示例8: update_port
def update_port(self, context, id, port):
attrs = port['port']
need_port_update_notify = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding,
original_port=original_port)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_port = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_port._port
开发者ID:aignatov,项目名称:neutron,代码行数:52,代码来源:plugin.py
示例9: instance
def instance(cls, l3_agent):
"""Creates instance (singleton) of service.
Do not directly call this for the base class. Instead, it should be
called by a child class, that represents a specific service type.
This ensures that only one instance is created for all agents of a
specific service type.
"""
if not cls._instance:
with lockutils.lock('instance'):
if not cls._instance:
cls._instance = cls(l3_agent)
return cls._instance
开发者ID:CiscoSystems,项目名称:neutron,代码行数:15,代码来源:advanced_service.py
示例10: get_client
def get_client(context, admin=False):
if admin or (context.is_admin and not context.auth_token):
with lockutils.lock('neutron_admin_auth_token_lock'):
orig_token = AdminTokenStore.get().admin_auth_token
client = _get_client(orig_token, admin=True)
return ClientWrapper(client)
# We got a user token that we can use as-is
if context.auth_token:
token = context.auth_token
return _get_client(token=token)
# We did not get a user token and we should not be using
# an admin token so log an error
raise neutron_client_exc.Unauthorized()
开发者ID:philpraxis,项目名称:group-based-policy,代码行数:15,代码来源:client.py
示例11: delete_port
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Deleting port %s"), id)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to prevent
# undesired eventlet yields leading to 'lock wait timeout' errors
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(id=id).with_lockmode('update').one())
except sa_exc.NoResultFound:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug(_("The port '%s' was deleted"), id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
mech_context = driver_context.PortContext(self, context, port,
network)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
LOG.debug(_("Calling base delete_port"))
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
try:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
self.notify_security_groups_member_updated(context, port)
开发者ID:leejian0612,项目名称:ryu-ovsnetwork,代码行数:47,代码来源:plugin.py
示例12: delete_port
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Deleting port %s"), id)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug(_("The port '%s' was deleted"), id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
mech_context = driver_context.PortContext(self, context, port,
network, binding)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
LOG.debug(_("Calling base delete_port"))
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
try:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
self.notify_security_groups_member_updated(context, port)
开发者ID:aignatov,项目名称:neutron,代码行数:46,代码来源:plugin.py
示例13: _run_commands
def _run_commands(self, port, commands):
num_tries = 0
max_tries = 1 + self._config.auth_failure_retries
sleep_time = self._config.auth_failure_retry_interval
while True:
num_tries += 1
try:
# we must lock during switch communication here because we run
# the save commands in a separate greenthread.
with lockutils.lock('CiscoDriver-%s' % (port.switch_host),
lock_file_prefix='neutron-'):
return self._run_commands_inner(port, commands)
except CiscoException as err:
if (num_tries == max_tries or not self._retryable_error(err)):
raise
LOG.warning("Received retryable failure: %s" % err)
time.sleep(sleep_time)
开发者ID:naototty,项目名称:ironic-neutron-plugin,代码行数:18,代码来源:driver.py
示例14: delete_listener
def delete_listener(self, context, id):
with contextlib.nested(lockutils.lock('db-access'),
context.session.begin(subtransactions=True)):
listener_db_entry = self._get_resource(context, models.Listener, id)
#if listener_db_entry.admin_state_up:
# filters = {'loadbalancer_id': [listener_db_entry.loadbalancer_id],
# 'admin_state_up': [True]}
# all_filters = {'loadbalancer_id': [listener_db_entry.loadbalancer_id]}
# all_listeners = self._get_resources(context,
# models.Listener,
# filters=all_filters)
# if len(all_listeners)>1:
# up_listeners = self._get_resources(context,
# models.Listener,
# filters=filters)
# if len(up_listeners)<=1:
# raise loadbalancerv2.OneListenerAdminStateUpAtLeast(
# lb_id=listener_db_entry.loadbalancer_id)
context.session.delete(listener_db_entry)
开发者ID:CingHu,项目名称:neutron-ustack,代码行数:19,代码来源:loadbalancer_dbv2.py
示例15: __init__
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)
开发者ID:50infivedays,项目名称:neutron,代码行数:2,代码来源:lockutils.py
示例16: __contains__
def __contains__(self, key):
with lockutils.lock(key):
return self._exists_unlocked(key)
开发者ID:CiscoSystems,项目名称:neutron,代码行数:3,代码来源:memory.py
示例17: _get
def _get(self, key, default=None):
with lockutils.lock(key):
return self._get_unlocked(key, default)[1]
开发者ID:CiscoSystems,项目名称:neutron,代码行数:3,代码来源:memory.py
示例18: update_port_status
def update_port_status(self, context, port_id, status, host=None):
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to prevent
# undesired eventlet yields leading to 'lock wait timeout' errors
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(port_id=port['id'],
host=host,
session=session)
if not binding:
LOG.error(_("Binding info for port %s not found"),
port_id)
return False
binding['status'] = status
binding.update(binding)
# binding already updated
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
original_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
port.status = self._generate_dvr_port_status(session,
port['id'])
updated_port = self._make_port_dict(port)
mech_context = (driver_context.PortContext(
self, context, updated_port, network,
original_port=original_port,
binding=binding))
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port.status != status:
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
mech_context = driver_context.PortContext(
self, context, updated_port, network,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
self._check_and_delete_dvr_port_binding(mech_context, context)
return True
开发者ID:joey5678,项目名称:tricircle,代码行数:63,代码来源:plugin.py
示例19: delete_port
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Deleting port %s"), id)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
mech_context = None
# REVISIT: Serialize this operation with a semaphore to prevent
# undesired eventlet yields leading to 'lock wait timeout' errors
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(id=id).with_lockmode('update').one())
except sa_exc.NoResultFound:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug(_("The port '%s' was deleted"), id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(id)
for bind in bindings:
mech_context = driver_context.PortContext(self, context,
port, network,
binding=bind)
self.mechanism_manager.delete_port_precommit(mech_context)
LOG.debug("Calling base delete_port %s for DVR", id)
super(Ml2Plugin, self).delete_port(context, id)
else:
mech_context = driver_context.PortContext(self, context, port,
network)
if "compute:" in port['device_owner']:
self.dvr_deletens_ifnovm(context, id)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
LOG.debug(_("Calling base delete_port"))
if l3plugin:
l3plugin.disassociate_floatingips(context, id)
super(Ml2Plugin, self).delete_port(context, id)
try:
# for both normal and DVR Interface ports, only one invocation of
# delete_port_postcommit
if mech_context:
self.mechanism_manager.delete_port_postcommit(mech_context)
else:
LOG.error(_("Unable to invoke delete_port_postcommit,"
" mech_context NULL for port %s"), id)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed for"
" port %s"), id)
self.notify_security_groups_member_updated(context, port)
开发者ID:joey5678,项目名称:tricircle,代码行数:61,代码来源:plugin.py
示例20: delete_network
def delete_network(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
# function is not used because it auto-deletes ports and
# subnets from the DB without invoking the derived class's
# delete_port() or delete_subnet(), preventing mechanism
# drivers from being called. This approach should be revisited
# when the API layer is reworked during icehouse.
LOG.debug(_("Deleting network %s"), id)
session = context.session
while True:
try:
# REVISIT(rkukura): Its not clear that
# with_lockmode('update') is really needed in this
# transaction, and if not, the semaphore can also be
# removed.
#
# REVISIT: Serialize this operation with a semaphore
# to prevent deadlock waiting to acquire a DB lock
# held by another thread in the same process, leading
# to 'lock wait timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
self._process_l3_delete(context, id)
# Get ports to auto-delete.
ports = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(network_id=id).
with_lockmode('update').all())
LOG.debug(_("Ports to auto-delete: %s"), ports)
only_auto_del = all(p.device_owner
in db_base_plugin_v2.
AUTO_DELETE_PORT_OWNERS
for p in ports)
if not only_auto_del:
LOG.debug(_("Tenant-owned ports exist"))
raise exc.NetworkInUse(net_id=id)
# Get subnets to auto-delete.
subnets = (session.query(models_v2.Subnet).
enable_eagerloads(False).
filter_by(network_id=id).
with_lockmode('update').all())
LOG.debug(_("Subnets to auto-delete: %s"), subnets)
if not (ports or subnets):
network = self.get_network(context, id)
mech_context = driver_context.NetworkContext(self,
context,
network)
self.mechanism_manager.delete_network_precommit(
mech_context)
record = self._get_network(context, id)
LOG.debug(_("Deleting network record %s"), record)
session.delete(record)
for segment in mech_context.network_segments:
self.type_manager.release_segment(session, segment)
# The segment records are deleted via cascade from the
# network record, so explicit removal is not necessary.
LOG.debug(_("Committing transaction"))
break
except os_db_exception.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
msg = _("A concurrent port creation has occurred")
LOG.warning(msg)
continue
for port in ports:
try:
self.delete_port(context, port.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Exception auto-deleting port %s"),
port.id)
for subnet in subnets:
try:
self.delete_subnet(context, subnet.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Exception auto-deleting subnet %s"),
subnet.id)
try:
self.mechanism_manager.delete_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the network. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_("mechanism_manager.delete_network_postcommit failed"))
self.notifier.network_delete(context, id)
开发者ID:aignatov,项目名称:neutron,代码行数:96,代码来源:plugin.py
注:本文中的neutron.openstack.common.lockutils.lock函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论