本文整理汇总了Python中portage.locks.unlockfile函数的典型用法代码示例。如果您正苦于以下问题:Python unlockfile函数的具体用法?Python unlockfile怎么用?Python unlockfile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unlockfile函数的17个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: getUnreadItems
def getUnreadItems(self, repoid, update=False):
"""
Determine if there are unread relevant items in news.repoid.unread.
If there are unread items return their number.
If update is specified, updateNewsItems( repoid ) will be called to
check for new items.
"""
if update:
self.updateItems(repoid)
unread_filename = self._unread_filename(repoid)
unread_lock = None
try:
unread_lock = lockfile(unread_filename, wantnewlockfile=1)
except (InvalidLocation, OperationNotPermitted, PermissionDenied):
pass
try:
try:
return len(grabfile(unread_filename))
except PermissionDenied:
return 0
finally:
if unread_lock:
unlockfile(unread_lock)
开发者ID:nullishzero,项目名称:Portage,代码行数:25,代码来源:news.py
示例2: unlock
def unlock(self):
if self._lock_obj is None:
raise AssertionError('not locked')
if self.returncode is None:
raise AssertionError('lock not acquired yet')
unlockfile(self._lock_obj)
self._lock_obj = None
开发者ID:devurandom,项目名称:portage,代码行数:7,代码来源:AsynchronousLock.py
示例3: _unlock
def _unlock(self):
if self._lock_obj is None:
raise AssertionError('not locked')
if self.returncode is None:
raise AssertionError('lock not acquired yet')
if self._unlock_future is not None:
raise AssertionError("already unlocked")
self._unlock_future = self.scheduler.create_future()
unlockfile(self._lock_obj)
self._lock_obj = None
开发者ID:dol-sen,项目名称:portage,代码行数:10,代码来源:AsynchronousLock.py
示例4: unlock
def unlock(self):
"""
This method is deprecated in favor of async_unlock, since waiting
for the child process to respond can trigger event loop recursion
which is incompatible with asyncio.
"""
if self._imp is None:
raise AssertionError('not locked')
if isinstance(self._imp, (_LockProcess, _LockThread)):
self._imp.unlock()
else:
unlockfile(self._imp)
self._imp = None
开发者ID:dol-sen,项目名称:portage,代码行数:13,代码来源:AsynchronousLock.py
示例5: populate
def populate(self, getbinpkgs=0):
"populates the binarytree"
if self._populating:
return
from portage.locks import lockfile, unlockfile
pkgindex_lock = None
try:
if os.access(self.pkgdir, os.W_OK):
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
self._populating = True
self._populate(getbinpkgs)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
self._populating = False
开发者ID:Neuvoo,项目名称:legacy-portage,代码行数:17,代码来源:bintree.py
示例6: async_unlock
def async_unlock(self):
"""
Release the lock asynchronously. Release notification is available
via the add_done_callback method of the returned Future instance.
@returns: Future, result is None
"""
if self._imp is None:
raise AssertionError('not locked')
if self._unlock_future is not None:
raise AssertionError("already unlocked")
if isinstance(self._imp, (_LockProcess, _LockThread)):
unlock_future = self._imp.async_unlock()
else:
unlockfile(self._imp)
unlock_future = self.scheduler.create_future()
self.scheduler.call_soon(unlock_future.set_result, None)
self._imp = None
self._unlock_future = unlock_future
return unlock_future
开发者ID:dol-sen,项目名称:portage,代码行数:20,代码来源:AsynchronousLock.py
示例7: populate
def populate(self, getbinpkgs=0, getbinpkgsonly=None):
"populates the binarytree"
if getbinpkgsonly is not None:
warnings.warn(
"portage.dbapi.bintree.binarytree.populate(): " + \
"getbinpkgsonly parameter is deprecated",
DeprecationWarning, stacklevel=2)
if self._populating:
return
from portage.locks import lockfile, unlockfile
pkgindex_lock = None
try:
if os.access(self.pkgdir, os.W_OK):
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
self._populating = True
self._populate(getbinpkgs)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
self._populating = False
开发者ID:TommyD,项目名称:gentoo-portage-multilib,代码行数:23,代码来源:bintree.py
示例8: fetch
#.........这里部分代码省略.........
continue
except (IOError, OSError):
pass
fetched = 1
continue
if True:
# File is the correct size--check the checksums for the fetched
# file NOW, for those users who don't have a stable/continuous
# net connection. This way we have a chance to try to download
# from another mirror...
digests = _filter_unaccelarated_hashes(mydigests[myfile])
if hash_filter is not None:
digests = _apply_hash_filter(digests, hash_filter)
verified_ok, reason = verify_all(myfile_path, digests)
if not verified_ok:
writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
noiselevel=-1)
writemsg(_("!!! Reason: %s\n") % reason[0],
noiselevel=-1)
writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
(reason[1], reason[2]), noiselevel=-1)
if reason[0] == _("Insufficient data for checksum verification"):
return 0
temp_filename = \
_checksum_failure_temp_file(
mysettings["DISTDIR"], myfile)
writemsg_stdout(_("Refetching... "
"File renamed to '%s'\n\n") % \
temp_filename, noiselevel=-1)
fetched=0
checksum_failure_count += 1
if checksum_failure_count == \
checksum_failure_primaryuri:
# Switch to "primaryuri" mode in order
# to increase the probablility of
# of success.
primaryuris = \
primaryuri_dict.get(myfile)
if primaryuris:
uri_list.extend(
reversed(primaryuris))
if checksum_failure_count >= \
checksum_failure_max_tries:
break
else:
eout = EOutput()
eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
if digests:
eout.ebegin("%s %s ;-)" % \
(myfile, " ".join(sorted(digests))))
eout.eend(0)
fetched=2
break
else:
if not myret:
fetched=2
break
elif mydigests!=None:
writemsg(_("No digest file available and download failed.\n\n"),
noiselevel=-1)
finally:
if use_locks and file_lock:
unlockfile(file_lock)
file_lock = None
if listonly:
writemsg_stdout("\n", noiselevel=-1)
if fetched != 2:
if restrict_fetch and not restrict_fetch_msg:
restrict_fetch_msg = True
msg = _("\n!!! %s/%s"
" has fetch restriction turned on.\n"
"!!! This probably means that this "
"ebuild's files must be downloaded\n"
"!!! manually. See the comments in"
" the ebuild for more information.\n\n") % \
(mysettings["CATEGORY"], mysettings["PF"])
writemsg_level(msg,
level=logging.ERROR, noiselevel=-1)
elif restrict_fetch:
pass
elif listonly:
pass
elif not filedict[myfile]:
writemsg(_("Warning: No mirrors available for file"
" '%s'\n") % (myfile), noiselevel=-1)
else:
writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
noiselevel=-1)
if listonly:
failed_files.add(myfile)
continue
elif fetchonly:
failed_files.add(myfile)
continue
return 0
if failed_files:
return 0
return 1
开发者ID:clickbeetle,项目名称:portage-cb,代码行数:101,代码来源:fetch.py
示例9: unlock
def unlock(self):
"""Release our exclusive lock on the preserved libs registry."""
if self._lock is None:
raise AssertionError("not locked")
unlockfile(self._lock)
self._lock = None
开发者ID:zy-sunshine,项目名称:easymgc,代码行数:6,代码来源:PreservedLibsRegistry.py
示例10: _input_handler
def _input_handler(self):
# Read the whole pickle in a single atomic read() call.
data = self._read_buf(self._files.pipe_in)
if data is None:
pass # EAGAIN
elif data:
try:
obj = pickle.loads(data)
except SystemExit:
raise
except Exception:
# The pickle module can raise practically
# any exception when given corrupt data.
pass
else:
self._reopen_input()
cmd_key = obj[0]
cmd_handler = self.commands[cmd_key]
reply = cmd_handler(obj)
try:
self._send_reply(reply)
except OSError as e:
if e.errno == errno.ENXIO:
# This happens if the client side has been killed.
pass
else:
raise
# Allow the command to execute hooks after its reply
# has been sent. This hook is used by the 'exit'
# command to kill the ebuild process. For some
# reason, the ebuild-ipc helper hangs up the
# ebuild process if it is waiting for a reply
# when we try to kill the ebuild process.
reply_hook = getattr(cmd_handler,
'reply_hook', None)
if reply_hook is not None:
reply_hook()
else: # EIO/POLLHUP
# This can be triggered due to a race condition which happens when
# the previous _reopen_input() call occurs before the writer has
# closed the pipe (see bug #401919). It's not safe to re-open
# without a lock here, since it's possible that another writer will
# write something to the pipe just before we close it, and in that
# case the write will be lost. Therefore, try for a non-blocking
# lock, and only re-open the pipe if the lock is acquired.
lock_filename = os.path.join(
os.path.dirname(self.input_fifo), '.ipc_lock')
try:
lock_obj = lockfile(lock_filename, unlinkfile=True,
flags=os.O_NONBLOCK)
except TryAgain:
# We'll try again when another IO_HUP event arrives.
pass
else:
try:
self._reopen_input()
finally:
unlockfile(lock_obj)
开发者ID:gentoo,项目名称:portage,代码行数:62,代码来源:EbuildIpcDaemon.py
示例11: fix
def fix(self, **kwargs):
onProgress = kwargs.get('onProgress', None)
bintree = self._bintree
_instance_key = bintree.dbapi._instance_key
cpv_all = self._bintree.dbapi.cpv_all()
cpv_all.sort()
missing = []
maxval = 0
if onProgress:
onProgress(maxval, 0)
pkgindex = self._pkgindex
missing = []
stale = []
metadata = {}
for d in pkgindex.packages:
cpv = _pkg_str(d["CPV"], metadata=d,
settings=bintree.settings)
d["CPV"] = cpv
metadata[_instance_key(cpv)] = d
if not bintree.dbapi.cpv_exists(cpv):
stale.append(cpv)
for cpv in cpv_all:
d = metadata.get(_instance_key(cpv))
if not d or self._need_update(cpv, d):
missing.append(cpv)
if missing or stale:
from portage import locks
pkgindex_lock = locks.lockfile(
self._pkgindex_file, wantnewlockfile=1)
try:
# Repopulate with lock held.
bintree._populate()
cpv_all = self._bintree.dbapi.cpv_all()
cpv_all.sort()
pkgindex = bintree._load_pkgindex()
self._pkgindex = pkgindex
# Recount stale/missing packages, with lock held.
missing = []
stale = []
metadata = {}
for d in pkgindex.packages:
cpv = _pkg_str(d["CPV"], metadata=d,
settings=bintree.settings)
d["CPV"] = cpv
metadata[_instance_key(cpv)] = d
if not bintree.dbapi.cpv_exists(cpv):
stale.append(cpv)
for cpv in cpv_all:
d = metadata.get(_instance_key(cpv))
if not d or self._need_update(cpv, d):
missing.append(cpv)
maxval = len(missing)
for i, cpv in enumerate(missing):
d = bintree._pkgindex_entry(cpv)
try:
bintree._eval_use_flags(cpv, d)
except portage.exception.InvalidDependString:
writemsg("!!! Invalid binary package: '%s'\n" % \
bintree.getname(cpv), noiselevel=-1)
else:
metadata[_instance_key(cpv)] = d
if onProgress:
onProgress(maxval, i+1)
for cpv in stale:
del metadata[_instance_key(cpv)]
# We've updated the pkgindex, so set it to
# repopulate when necessary.
bintree.populated = False
del pkgindex.packages[:]
pkgindex.packages.extend(metadata.values())
bintree._pkgindex_write(self._pkgindex)
finally:
locks.unlockfile(pkgindex_lock)
if onProgress:
if maxval == 0:
maxval = 1
onProgress(maxval, maxval)
return (True, None)
开发者ID:dol-sen,项目名称:portage,代码行数:90,代码来源:binhost.py
示例12: fix
def fix(self, **kwargs):
onProgress = kwargs.get('onProgress', None)
bintree = self._bintree
cpv_all = self._bintree.dbapi.cpv_all()
cpv_all.sort()
missing = []
maxval = 0
if onProgress:
onProgress(maxval, 0)
pkgindex = self._pkgindex
missing = []
metadata = {}
for d in pkgindex.packages:
metadata[d["CPV"]] = d
for i, cpv in enumerate(cpv_all):
d = metadata.get(cpv)
if not d or self._need_update(cpv, d):
missing.append(cpv)
stale = set(metadata).difference(cpv_all)
if missing or stale:
from portage import locks
pkgindex_lock = locks.lockfile(
self._pkgindex_file, wantnewlockfile=1)
try:
# Repopulate with lock held.
bintree._populate()
cpv_all = self._bintree.dbapi.cpv_all()
cpv_all.sort()
pkgindex = bintree._load_pkgindex()
self._pkgindex = pkgindex
metadata = {}
for d in pkgindex.packages:
metadata[d["CPV"]] = d
# Recount missing packages, with lock held.
del missing[:]
for i, cpv in enumerate(cpv_all):
d = metadata.get(cpv)
if not d or self._need_update(cpv, d):
missing.append(cpv)
maxval = len(missing)
for i, cpv in enumerate(missing):
try:
metadata[cpv] = bintree._pkgindex_entry(cpv)
except portage.exception.InvalidDependString:
writemsg("!!! Invalid binary package: '%s'\n" % \
bintree.getname(cpv), noiselevel=-1)
if onProgress:
onProgress(maxval, i+1)
for cpv in set(metadata).difference(
self._bintree.dbapi.cpv_all()):
del metadata[cpv]
# We've updated the pkgindex, so set it to
# repopulate when necessary.
bintree.populated = False
del pkgindex.packages[:]
pkgindex.packages.extend(metadata.values())
bintree._pkgindex_write(self._pkgindex)
finally:
locks.unlockfile(pkgindex_lock)
if onProgress:
if maxval == 0:
maxval = 1
onProgress(maxval, maxval)
return None
开发者ID:clickbeetle,项目名称:portage-cb,代码行数:76,代码来源:binhost.py
示例13: _input_handler
def _input_handler(self, fd, event):
# Read the whole pickle in a single atomic read() call.
data = None
if event & self.scheduler.IO_IN:
# For maximum portability, use os.read() here since
# array.fromfile() and file.read() are both known to
# erroneously return an empty string from this
# non-blocking fifo stream on FreeBSD (bug #337465).
try:
data = os.read(fd, self._bufsize)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
# Assume that another event will be generated
# if there's any relevant data.
if data:
try:
obj = pickle.loads(data)
except SystemExit:
raise
except Exception:
# The pickle module can raise practically
# any exception when given corrupt data.
pass
else:
self._reopen_input()
cmd_key = obj[0]
cmd_handler = self.commands[cmd_key]
reply = cmd_handler(obj)
try:
self._send_reply(reply)
except OSError as e:
if e.errno == errno.ENXIO:
# This happens if the client side has been killed.
pass
else:
raise
# Allow the command to execute hooks after its reply
# has been sent. This hook is used by the 'exit'
# command to kill the ebuild process. For some
# reason, the ebuild-ipc helper hangs up the
# ebuild process if it is waiting for a reply
# when we try to kill the ebuild process.
reply_hook = getattr(cmd_handler,
'reply_hook', None)
if reply_hook is not None:
reply_hook()
elif event & self.scheduler.IO_HUP:
# This can be triggered due to a race condition which happens when
# the previous _reopen_input() call occurs before the writer has
# closed the pipe (see bug #401919). It's not safe to re-open
# without a lock here, since it's possible that another writer will
# write something to the pipe just before we close it, and in that
# case the write will be lost. Therefore, try for a non-blocking
# lock, and only re-open the pipe if the lock is acquired.
lock_filename = os.path.join(
os.path.dirname(self.input_fifo), '.ipc_lock')
try:
lock_obj = lockfile(lock_filename, unlinkfile=True,
flags=os.O_NONBLOCK)
except TryAgain:
# We'll try again when another IO_HUP event arrives.
pass
else:
try:
self._reopen_input()
finally:
unlockfile(lock_obj)
return True
开发者ID:Spencerx,项目名称:portage,代码行数:76,代码来源:EbuildIpcDaemon.py
示例14: inject
def inject(self, cpv, filename=None):
"""Add a freshly built package to the database. This updates
$PKGDIR/Packages with the new package metadata (including MD5).
@param cpv: The cpv of the new package to inject
@type cpv: string
@param filename: File path of the package to inject, or None if it's
already in the location returned by getname()
@type filename: string
@rtype: None
"""
mycat, mypkg = catsplit(cpv)
if not self.populated:
self.populate()
if filename is None:
full_path = self.getname(cpv)
else:
full_path = filename
try:
s = os.stat(full_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
noiselevel=-1)
return
mytbz2 = portage.xpak.tbz2(full_path)
slot = mytbz2.getfile("SLOT")
if slot is None:
writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
noiselevel=-1)
return
slot = slot.strip()
self.dbapi.cpv_inject(cpv)
# Reread the Packages index (in case it's been changed by another
# process) and then updated it, all while holding a lock.
from portage.locks import lockfile, unlockfile
pkgindex_lock = None
created_symlink = False
try:
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
if filename is not None:
new_filename = self.getname(cpv)
self._ensure_dir(os.path.dirname(new_filename))
_movefile(filename, new_filename, mysettings=self.settings)
if self._all_directory and \
self.getname(cpv).split(os.path.sep)[-2] == "All":
self._create_symlink(cpv)
created_symlink = True
pkgindex = self._load_pkgindex()
if not self._pkgindex_version_supported(pkgindex):
pkgindex = self._new_pkgindex()
try:
d = self._pkgindex_entry(cpv)
except portage.exception.InvalidDependString:
writemsg(_("!!! Invalid binary package: '%s'\n") % \
self.getname(cpv), noiselevel=-1)
self.dbapi.cpv_remove(cpv)
del self._pkg_paths[cpv]
return
# If found, remove package(s) with duplicate path.
path = d.get("PATH", "")
for i in range(len(pkgindex.packages) - 1, -1, -1):
d2 = pkgindex.packages[i]
if path and path == d2.get("PATH"):
# Handle path collisions in $PKGDIR/All
# when CPV is not identical.
del pkgindex.packages[i]
elif cpv == d2.get("CPV"):
if path == d2.get("PATH", ""):
del pkgindex.packages[i]
elif created_symlink and not d2.get("PATH", ""):
# Delete entry for the package that was just
# overwritten by a symlink to this package.
del pkgindex.packages[i]
pkgindex.packages.append(d)
self._update_pkgindex_header(pkgindex.header)
from portage.util import atomic_ofstream
f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
try:
pkgindex.write(f)
finally:
f.close()
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
if self._remotepkgs is not None:
# When a remote package is downloaded and injected,
# update state so self.isremote() returns False.
self._remotepkgs.pop(cpv, None)
开发者ID:TommyD,项目名称:gentoo-portage-multilib,代码行数:98,代码来源:bintree.py
示例15: updateItems
def updateItems(self, repoid):
"""
Figure out which news items from NEWS_PATH are both unread and relevant to
the user (according to the GLEP 42 standards of relevancy). Then add these
items into the news.repoid.unread file.
"""
# Ensure that the unread path exists and is writable.
try:
ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
mode=self._dir_mode, mask=self._mode_mask)
except (OperationNotPermitted, PermissionDenied):
return
if not os.access(self.unread_path, os.W_OK):
return
news_dir = self._news_dir(repoid)
try:
news = _os.listdir(_unicode_encode(news_dir,
encoding=_encodings['fs'], errors='strict'))
except OSError:
return
skip_filename = self._skip_filename(repoid)
unread_filename = self._unread_filename(repoid)
unread_lock = lockfile(unread_filename, wantnewlockfile=1)
try:
try:
unread = set(grabfile(unread_filename))
unread_orig = unread.copy()
skip = set(grabfile(skip_filename))
skip_orig = skip.copy()
except PermissionDenied:
return
for itemid in news:
try:
itemid = _unicode_decode(itemid,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
itemid = _unicode_decode(itemid,
encoding=_encodings['fs'], errors='replace')
writemsg_level(
_("!!! Invalid encoding in news item name: '%s'\n") % \
itemid, level=logging.ERROR, noiselevel=-1)
continue
if itemid in skip:
continue
filename = os.path.join(news_dir, itemid,
itemid + "." + self.language_id + ".txt")
if not os.path.isfile(filename):
continue
item = NewsItem(filename, itemid)
if not item.isValid():
continue
if item.isRelevant(profile=self._profile_path,
config=self.config, vardb=self.vdb):
unread.add(item.name)
skip.add(item.name)
if unread != unread_orig:
write_atomic(unread_filename,
"".join("%s\n" % x for x in sorted(unread)))
apply_secpass_permissions(unread_filename,
uid=self._uid, gid=self._gid,
mode=self._file_mode, mask=self._mode_mask)
if skip != skip_orig:
write_atomic(skip_filename,
"".join("%s\n" % x for x in sorted(skip)))
apply_secpass_permissions(skip_filename,
uid=self._uid, gid=self._gid,
mode=self._file_mode, mask=self._mode_mask)
finally:
unlockfile(unread_lock)
开发者ID:nullishzero,项目名称:Portage,代码行数:79,代码来源:news.py
示例16: unlock
def unlock(self):
unlockfile(self._lock)
self._lock = None
开发者ID:devurandom,项目名称:portage,代码行数:3,代码来源:files.py
示例17: unlock
def unlock(self):
if self._lock is None:
raise AssertionError("not locked")
unlockfile(self._lock)
self._lock = None
开发者ID:jonasstein,项目名称:portage,代码行数:5,代码来源:files.py
注:本文中的portage.locks.unlockfile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论