• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python util.filechunkiter函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mercurial.util.filechunkiter函数的典型用法代码示例。如果您正苦于以下问题:Python filechunkiter函数的具体用法?Python filechunkiter怎么用?Python filechunkiter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了filechunkiter函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: snapshot_wdir

def snapshot_wdir(ui, repo, files, tmproot):
    '''snapshot files from working directory.
    if not using snapshot, -I/-X does not work and recursive diff
    in tools like kdiff3 and meld displays too many files.'''
    repo_root = repo.root

    dirname = os.path.basename(repo_root)
    if dirname == "":
        dirname = "root"
    base = os.path.join(tmproot, dirname)
    os.mkdir(base)
    ui.note(_('making snapshot of %d files from working dir\n') %
            (len(files)))

    fns_and_mtime = []

    for fn in files:
        wfn = util.pconvert(fn)
        ui.note('  %s\n' % wfn)
        dest = os.path.join(base, wfn)
        destdir = os.path.dirname(dest)
        if not os.path.isdir(destdir):
            os.makedirs(destdir)

        fp = open(dest, 'wb')
        for chunk in util.filechunkiter(repo.wopener(wfn)):
            fp.write(chunk)
        fp.close()

        fns_and_mtime.append((dest, os.path.join(repo_root, fn),
            os.path.getmtime(dest)))


    return dirname, fns_and_mtime
开发者ID:c0ns0le,项目名称:cygwin,代码行数:34,代码来源:extdiff.py


示例2: drain

 def drain(self):
     '''
     Need to read all data from request, httplib is half-duplex
     '''
     length = int(self.env.get('CONTENT_LENGTH', 0))
     for s in util.filechunkiter(self.inp, limit=length):
         pass
开发者ID:IanLewis,项目名称:django-hgwebproxy,代码行数:7,代码来源:proxy.py


示例3: hexsha1

def hexsha1(data):
    """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
    object data"""
    h = util.sha1()
    for chunk in util.filechunkiter(data):
        h.update(chunk)
    return h.hexdigest()
开发者ID:ZanderZhang,项目名称:Andriod-Learning,代码行数:7,代码来源:lfutil.py


示例4: consumestreamclone

def consumestreamclone(repo, fp):
    """Apply the contents from a streaming clone file.

    This code is copied from Mercurial. Until Mercurial 3.5, this code was
    a closure in wireproto.py and not consumeable by extensions.
    """
    lock = repo.lock()
    try:
        repo.ui.status(_('streaming all changes\n'))
        l = fp.readline()
        try:
            total_files, total_bytes = map(int, l.split(' ', 1))
        except (ValueError, TypeError):
            raise error.ResponseError(
                _('unexpected response from remote server:'), l)
        repo.ui.status(_('%d files to transfer, %s of data\n') %
                       (total_files, util.bytecount(total_bytes)))
        handled_bytes = 0
        repo.ui.progress(_('clone'), 0, total=total_bytes)
        start = time.time()

        tr = repo.transaction(_('clone'))
        try:
            for i in xrange(total_files):
                # XXX doesn't support '\n' or '\r' in filenames
                l = fp.readline()
                try:
                    name, size = l.split('\0', 1)
                    size = int(size)
                except (ValueError, TypeError):
                    raise error.ResponseError(
                        _('unexpected response from remote server:'), l)
                if repo.ui.debugflag:
                    repo.ui.debug('adding %s (%s)\n' %
                                  (name, util.bytecount(size)))
                # for backwards compat, name was partially encoded
                ofp = repo.svfs(store.decodedir(name), 'w')
                for chunk in util.filechunkiter(fp, limit=size):
                    handled_bytes += len(chunk)
                    repo.ui.progress(_('clone'), handled_bytes,
                                     total=total_bytes)
                    ofp.write(chunk)
                ofp.close()
            tr.close()
        finally:
            tr.release()

        # Writing straight to files circumvented the inmemory caches
        repo.invalidate()

        elapsed = time.time() - start
        if elapsed <= 0:
            elapsed = 0.001
        repo.ui.progress(_('clone'), None)
        repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
                       (util.bytecount(total_bytes), elapsed,
                        util.bytecount(total_bytes / elapsed)))
    finally:
        lock.release()
开发者ID:Nephyrin,项目名称:bzexport,代码行数:59,代码来源:__init__.py


示例5: _getfile

 def _getfile(self, tmpfile, filename, hash):
     path = lfutil.findfile(self.remote, hash)
     if not path:
         raise basestore.StoreError(filename, hash, self.url,
             _("can't get file locally"))
     with open(path, 'rb') as fd:
         return lfutil.copyandhash(
             util.filechunkiter(fd), tmpfile)
开发者ID:motlin,项目名称:cyg,代码行数:8,代码来源:localstore.py


示例6: hashfile

def hashfile(file):
    if not os.path.exists(file):
        return ''
    hasher = hashlib.sha1('')
    with open(file, 'rb') as fd:
        for data in util.filechunkiter(fd):
            hasher.update(data)
    return hasher.hexdigest()
开发者ID:motlin,项目名称:cyg,代码行数:8,代码来源:lfutil.py


示例7: hashfile

def hashfile(file):
    if not os.path.exists(file):
        return ''
    hasher = util.sha1('')
    fd = open(file, 'rb')
    for data in util.filechunkiter(fd, 128 * 1024):
        hasher.update(data)
    fd.close()
    return hasher.hexdigest()
开发者ID:ZanderZhang,项目名称:Andriod-Learning,代码行数:9,代码来源:lfutil.py


示例8: link

def link(src, dest):
    try:
        util.oslink(src, dest)
    except OSError:
        # if hardlinks fail, fallback on atomic copy
        dst = util.atomictempfile(dest)
        for chunk in util.filechunkiter(open(src, 'rb')):
            dst.write(chunk)
        dst.close()
        os.chmod(dest, os.stat(src).st_mode)
开发者ID:sandeepprasanna,项目名称:ODOO,代码行数:10,代码来源:lfutil.py


示例9: copytostoreabsolute

def copytostoreabsolute(repo, file, hash):
    util.makedirs(os.path.dirname(storepath(repo, hash)))
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    else:
        dst = util.atomictempfile(storepath(repo, hash))
        for chunk in util.filechunkiter(open(file, 'rb')):
            dst.write(chunk)
        dst.close()
        util.copymode(file, storepath(repo, hash))
        linktousercache(repo, hash)
开发者ID:sandeepprasanna,项目名称:ODOO,代码行数:11,代码来源:lfutil.py


示例10: consumev1

def consumev1(repo, fp, filecount, bytecount):
    """Apply the contents from version 1 of a streaming clone file handle.

    This takes the output from "streamout" and applies it to the specified
    repository.

    Like "streamout," the status line added by the wire protocol is not handled
    by this function.
    """
    lock = repo.lock()
    try:
        repo.ui.status(_('%d files to transfer, %s of data\n') %
                       (filecount, util.bytecount(bytecount)))
        handled_bytes = 0
        repo.ui.progress(_('clone'), 0, total=bytecount)
        start = time.time()

        tr = repo.transaction(_('clone'))
        try:
            for i in xrange(filecount):
                # XXX doesn't support '\n' or '\r' in filenames
                l = fp.readline()
                try:
                    name, size = l.split('\0', 1)
                    size = int(size)
                except (ValueError, TypeError):
                    raise error.ResponseError(
                        _('unexpected response from remote server:'), l)
                if repo.ui.debugflag:
                    repo.ui.debug('adding %s (%s)\n' %
                                  (name, util.bytecount(size)))
                # for backwards compat, name was partially encoded
                ofp = repo.svfs(store.decodedir(name), 'w')
                for chunk in util.filechunkiter(fp, limit=size):
                    handled_bytes += len(chunk)
                    repo.ui.progress(_('clone'), handled_bytes, total=bytecount)
                    ofp.write(chunk)
                ofp.close()
            tr.close()
        finally:
            tr.release()

        # Writing straight to files circumvented the inmemory caches
        repo.invalidate()

        elapsed = time.time() - start
        if elapsed <= 0:
            elapsed = 0.001
        repo.ui.progress(_('clone'), None)
        repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
                       (util.bytecount(bytecount), elapsed,
                        util.bytecount(bytecount / elapsed)))
    finally:
        lock.release()
开发者ID:djmitche,项目名称:build-puppet,代码行数:54,代码来源:bundleclone.py


示例11: copytostoreabsolute

def copytostoreabsolute(repo, file, hash):
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    elif not getattr(repo, "_isconverting", False):
        util.makedirs(os.path.dirname(storepath(repo, hash)))
        dst = util.atomictempfile(storepath(repo, hash),
                                  createmode=repo.store.createmode)
        for chunk in util.filechunkiter(open(file, 'rb')):
            dst.write(chunk)
        dst.close()
        linktousercache(repo, hash)
开发者ID:ZanderZhang,项目名称:Andriod-Learning,代码行数:11,代码来源:lfutil.py


示例12: copytostoreabsolute

def copytostoreabsolute(repo, file, hash):
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    else:
        util.makedirs(os.path.dirname(storepath(repo, hash)))
        with open(file, 'rb') as srcf:
            with util.atomictempfile(storepath(repo, hash),
                                     createmode=repo.store.createmode) as dstf:
                for chunk in util.filechunkiter(srcf):
                    dstf.write(chunk)
        linktousercache(repo, hash)
开发者ID:motlin,项目名称:cyg,代码行数:11,代码来源:lfutil.py


示例13: bail

    def bail(response, headers={}):
        length = int(req.env.get('CONTENT_LENGTH', 0))
        for s in util.filechunkiter(req, limit=length):
            # drain incoming bundle, else client will not see
            # response when run outside cgi script
            pass

        status = headers.pop('status', HTTP_OK)
        req.header(headers.items())
        req.respond(status, HGTYPE)
        req.write('0\n')
        req.write(response)
开发者ID:c0ns0le,项目名称:cygwin,代码行数:12,代码来源:protocol.py


示例14: link

def link(src, dest):
    """Try to create hardlink - if that fails, efficiently make a copy."""
    util.makedirs(os.path.dirname(dest))
    try:
        util.oslink(src, dest)
    except OSError:
        # if hardlinks fail, fallback on atomic copy
        dst = util.atomictempfile(dest)
        for chunk in util.filechunkiter(open(src, 'rb')):
            dst.write(chunk)
        dst.close()
        os.chmod(dest, os.stat(src).st_mode)
开发者ID:raymundviloria,项目名称:android-app,代码行数:12,代码来源:lfutil.py


示例15: generatestreamclone

def generatestreamclone(repo):
    """Emit content for a streaming clone.

    This is a generator of raw chunks that constitute a streaming clone.

    This code is copied from Mercurial. Until Mercurial 3.5, this code was
    a closure in wireproto.py and not consumeable by extensions.
    """
    entries = []
    total_bytes = 0
    # Get consistent snapshot of repo, lock during scan.
    lock = repo.lock()
    try:
        repo.ui.debug('scanning\n')
        for name, ename, size in repo.store.walk():
            if size:
                entries.append((name, size))
                total_bytes += size
    finally:
            lock.release()

    repo.ui.debug('%d files, %d bytes to transfer\n' %
                  (len(entries), total_bytes))
    yield '%d %d\n' % (len(entries), total_bytes)

    sopener = repo.svfs
    oldaudit = sopener.mustaudit
    debugflag = repo.ui.debugflag
    sopener.mustaudit = False

    try:
        for name, size in entries:
            if debugflag:
                repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
            # partially encode name over the wire for backwards compat
            yield '%s\0%d\n' % (store.encodedir(name), size)
            if size <= 65536:
                fp = sopener(name)
                try:
                    data = fp.read(size)
                finally:
                    fp.close()
                yield data
            else:
                for chunk in util.filechunkiter(sopener(name), limit=size):
                    yield chunk
    finally:
        sopener.mustaudit = oldaudit
开发者ID:Nephyrin,项目名称:bzexport,代码行数:48,代码来源:__init__.py


示例16: overridecat

def overridecat(orig, ui, repo, file1, *pats, **opts):
    ctx = scmutil.revsingle(repo, opts.get('rev'))
    err = 1
    notbad = set()
    m = scmutil.match(ctx, (file1,) + pats, opts)
    origmatchfn = m.matchfn
    def lfmatchfn(f):
        if origmatchfn(f):
            return True
        lf = lfutil.splitstandin(f)
        if lf is None:
            return False
        notbad.add(lf)
        return origmatchfn(lf)
    m.matchfn = lfmatchfn
    origbadfn = m.bad
    def lfbadfn(f, msg):
        if not f in notbad:
            origbadfn(f, msg)
    m.bad = lfbadfn
    for f in ctx.walk(m):
        fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
                                 pathname=f)
        lf = lfutil.splitstandin(f)
        if lf is None or origmatchfn(f):
            # duplicating unreachable code from commands.cat
            data = ctx[f].data()
            if opts.get('decode'):
                data = repo.wwritedata(f, data)
            fp.write(data)
        else:
            hash = lfutil.readstandin(repo, lf, ctx.rev())
            if not lfutil.inusercache(repo.ui, hash):
                store = basestore._openstore(repo)
                success, missing = store.get([(lf, hash)])
                if len(success) != 1:
                    raise util.Abort(
                        _('largefile %s is not in cache and could not be '
                          'downloaded')  % lf)
            path = lfutil.usercachepath(repo.ui, hash)
            fpin = open(path, "rb")
            for chunk in util.filechunkiter(fpin, 128 * 1024):
                fp.write(chunk)
            fpin.close()
        fp.close()
        err = 0
    return err
开发者ID:leetaizhu,项目名称:Odoo_ENV_MAC_OS,代码行数:47,代码来源:overrides.py


示例17: getlfile

        def getlfile(self, sha):
            """returns an iterable with the chunks of the file with sha sha"""
            stream = self._callstream("getlfile", sha=sha)
            length = stream.readline()
            try:
                length = int(length)
            except ValueError:
                self._abort(error.ResponseError(_("unexpected response:"), length))

            # SSH streams will block if reading more than length
            for chunk in util.filechunkiter(stream, 128 * 1024, length):
                yield chunk
            # HTTP streams must hit the end to process the last empty
            # chunk of Chunked-Encoding so the connection can be reused.
            if issubclass(self.__class__, httppeer.httppeer):
                chunk = stream.read(1)
                if chunk:
                    self._abort(error.ResponseError(_("unexpected response:"), chunk))
开发者ID:nixiValor,项目名称:Waterfox,代码行数:18,代码来源:proto.py


示例18: emitrevlogdata

 def emitrevlogdata():
     try:
         for name, size in entries:
             if debugflag:
                 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
             # partially encode name over the wire for backwards compat
             yield '%s\0%d\n' % (store.encodedir(name), size)
             if size <= 65536:
                 fp = svfs(name)
                 try:
                     data = fp.read(size)
                 finally:
                     fp.close()
                 yield data
             else:
                 for chunk in util.filechunkiter(svfs(name), limit=size):
                     yield chunk
     finally:
         svfs.mustaudit = oldaudit
开发者ID:djmitche,项目名称:build-puppet,代码行数:19,代码来源:bundleclone.py


示例19: copyfromcache

def copyfromcache(repo, hash, filename):
    '''Copy the specified largefile from the repo or system cache to
    filename in the repository. Return true on success or false if the
    file was not found in either cache (which should not happened:
    this is meant to be called only after ensuring that the needed
    largefile exists in the cache).'''
    wvfs = repo.wvfs
    path = findfile(repo, hash)
    if path is None:
        return False
    wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
    # The write may fail before the file is fully written, but we
    # don't use atomic writes in the working copy.
    with open(path, 'rb') as srcfd:
        with wvfs(filename, 'wb') as destfd:
            gothash = copyandhash(
                util.filechunkiter(srcfd), destfd)
    if gothash != hash:
        repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
                     % (filename, path, gothash))
        wvfs.unlink(filename)
        return False
    return True
开发者ID:motlin,项目名称:cyg,代码行数:23,代码来源:lfutil.py


示例20: censor

def censor(ui, repo, path, rev='', tombstone='', **opts):
    if not path:
        raise util.Abort(_('must specify file path to censor'))
    if not rev:
        raise util.Abort(_('must specify revision to censor'))

    wctx = repo[None]

    m = scmutil.match(wctx, (path,))
    if m.anypats() or len(m.files()) != 1:
        raise util.Abort(_('can only specify an explicit filename'))
    path = m.files()[0]
    flog = repo.file(path)
    if not len(flog):
        raise util.Abort(_('cannot censor file with no history'))

    rev = scmutil.revsingle(repo, rev, rev).rev()
    try:
        ctx = repo[rev]
    except KeyError:
        raise util.Abort(_('invalid revision identifier %s') % rev)

    try:
        fctx = ctx.filectx(path)
    except error.LookupError:
        raise util.Abort(_('file does not exist at revision %s') % rev)

    fnode = fctx.filenode()
    headctxs = [repo[c] for c in repo.heads()]
    heads = [c for c in headctxs if path in c and c.filenode(path) == fnode]
    if heads:
        headlist = ', '.join([short(c.node()) for c in heads])
        raise util.Abort(_('cannot censor file in heads (%s)') % headlist,
            hint=_('clean/delete and commit first'))

    wp = wctx.parents()
    if ctx.node() in [p.node() for p in wp]:
        raise util.Abort(_('cannot censor working directory'),
            hint=_('clean/delete/update first'))

    flogv = flog.version & 0xFFFF
    if flogv != revlog.REVLOGNG:
        raise util.Abort(
            _('censor does not support revlog version %d') % (flogv,))

    tombstone = filelog.packmeta({"censored": tombstone}, "")

    crev = fctx.filerev()

    if len(tombstone) > flog.rawsize(crev):
        raise util.Abort(_(
            'censor tombstone must be no longer than censored data'))

    # Using two files instead of one makes it easy to rewrite entry-by-entry
    idxread = repo.svfs(flog.indexfile, 'r')
    idxwrite = repo.svfs(flog.indexfile, 'wb', atomictemp=True)
    if flog.version & revlog.REVLOGNGINLINEDATA:
        dataread, datawrite = idxread, idxwrite
    else:
        dataread = repo.svfs(flog.datafile, 'r')
        datawrite = repo.svfs(flog.datafile, 'wb', atomictemp=True)

    # Copy all revlog data up to the entry to be censored.
    rio = revlog.revlogio()
    offset = flog.start(crev)

    for chunk in util.filechunkiter(idxread, limit=crev * rio.size):
        idxwrite.write(chunk)
    for chunk in util.filechunkiter(dataread, limit=offset):
        datawrite.write(chunk)

    def rewriteindex(r, newoffs, newdata=None):
        """Rewrite the index entry with a new data offset and optional new data.

        The newdata argument, if given, is a tuple of three positive integers:
        (new compressed, new uncompressed, added flag bits).
        """
        offlags, comp, uncomp, base, link, p1, p2, nodeid = flog.index[r]
        flags = revlog.gettype(offlags)
        if newdata:
            comp, uncomp, nflags = newdata
            flags |= nflags
        offlags = revlog.offset_type(newoffs, flags)
        e = (offlags, comp, uncomp, r, link, p1, p2, nodeid)
        idxwrite.write(rio.packentry(e, None, flog.version, r))
        idxread.seek(rio.size, 1)

    def rewrite(r, offs, data, nflags=revlog.REVIDX_DEFAULT_FLAGS):
        """Write the given full text to the filelog with the given data offset.

        Returns:
            The integer number of data bytes written, for tracking data offsets.
        """
        flag, compdata = flog.compress(data)
        newcomp = len(flag) + len(compdata)
        rewriteindex(r, offs, (newcomp, len(data), nflags))
        datawrite.write(flag)
        datawrite.write(compdata)
        dataread.seek(flog.length(r), 1)
        return newcomp
#.........这里部分代码省略.........
开发者ID:CSCI-362-02-2015,项目名称:RedTeam,代码行数:101,代码来源:censor.py



注:本文中的mercurial.util.filechunkiter函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python util.hidepassword函数代码示例发布时间:2022-05-27
下一篇:
Python util.email函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap