• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python hdfs.rmr函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pydoop.hdfs.rmr函数的典型用法代码示例。如果您正苦于以下问题:Python rmr函数的具体用法?Python rmr怎么用?Python rmr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了rmr函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: stat_on_local

 def stat_on_local(self):
     wd_ = tempfile.mkdtemp(prefix='pydoop_', suffix=UNI_CHR)
     p_ = os.path.join(wd_, make_random_str())
     if hdfs.default_is_local():
         wd, p = wd_, p_
         host = "default"
     else:
         wd, p = ('file:%s' % _ for _ in (wd_, p_))
         host = ""
     fs = hdfs.hdfs(host, 0)
     with fs.open_file(p_, 'w') as fo:
         fo.write(make_random_str())
     info = fs.get_path_info(p_)
     fs.close()
     s = hdfs.path.stat(p)
     os_s = os.stat(p_)
     for n in dir(s):
         if n.startswith('st_'):
             try:
                 exp_v = getattr(os_s, n)
             except AttributeError:
                 try:
                     exp_v = info[self.NMAP[n]]
                 except KeyError:
                     continue
                 self.assertEqual(getattr(s, n), exp_v)
     self.__check_extra_args(s, info)
     self.__check_wrapper_funcs(p)
     hdfs.rmr(wd)
开发者ID:kikkomep,项目名称:pydoop,代码行数:29,代码来源:test_path.py


示例2: samefile_link

 def samefile_link(self):
     wd_ = tempfile.mkdtemp(prefix='pydoop_', suffix=UNI_CHR)
     wd = 'file:%s' % wd_
     link = os.path.join(wd_, make_random_str())
     os.symlink(wd_, link)
     self.assertTrue(hdfs.path.samefile('file:%s' % link, 'file:%s' % wd_))
     hdfs.rmr(wd)
开发者ID:kikkomep,项目名称:pydoop,代码行数:7,代码来源:test_path.py


示例3: good

 def good(self):
     base_path = make_random_str()
     for path in base_path, base_path + UNI_CHR:
         hdfs.dump("foo\n", path)
         self.assertTrue(hdfs.path.exists(path))
         hdfs.rmr(path)
         self.assertFalse(hdfs.path.exists(path))
开发者ID:kikkomep,项目名称:pydoop,代码行数:7,代码来源:test_path.py


示例4: _clean_up

 def _clean_up(*paths):
     for p in paths:
         try:
             log.debug("Removing path: %s", p)
             phdfs.rmr(p)
         except StandardError as e:
             log.warning("Error deleting path %s", p)
             log.exception(e)
开发者ID:crs4,项目名称:hadoop-galaxy,代码行数:8,代码来源:dist_cat_paths.py


示例5: realpath

 def realpath(self):
     wd_ = tempfile.mkdtemp(prefix='pydoop_', suffix=UNI_CHR)
     wd = 'file:%s' % wd_
     link = os.path.join(wd_, make_random_str())
     os.symlink(wd_, link)
     expected_path = 'file:%s' % os.path.realpath(wd_)
     self.assertEqual(hdfs.path.realpath('file:%s' % link), expected_path)
     hdfs.rmr(wd)
开发者ID:crs4,项目名称:pydoop,代码行数:8,代码来源:test_path.py


示例6: cleanup

def cleanup(out_pathset):
  # clean-up job output
  for path in out_pathset:
    try:
      print >> sys.stderr, "Deleting output path", path
      phdfs.rmr(path)
    except StandardError as e:
      print >> sys.stderr, "Error!", str(e)
开发者ID:crs4,项目名称:seal-galaxy,代码行数:8,代码来源:recab_table_galaxy.py


示例7: _try_remove_hdfs_dir

def _try_remove_hdfs_dir(path):
    try:
        phdfs.rmr(path)
        return True
    except StandardError as e:
        logger.error("Error while trying to remove directory %s", path)
        logger.exception(e)
    return False
开发者ID:ilveroluca,项目名称:flink-pipeline,代码行数:8,代码来源:workflow.py


示例8: __clean_wd

 def __clean_wd(self):
   if self.remote_wd:
     try:
       self.logger.debug(
         "Removing temporary working directory %s", self.remote_wd
         )
       hdfs.rmr(self.remote_wd)
     except IOError:
       pass
开发者ID:ilveroluca,项目名称:pydoop,代码行数:9,代码来源:script.py


示例9: realpath

 def realpath(self):
     wd_ = tempfile.mkdtemp(prefix='pydoop_', suffix=UNI_CHR)
     wd = 'file:%s' % wd_
     link = os.path.join(wd_, make_random_str())
     os.symlink(wd_, link)
     expected_path = ('file:%s%s' % ("/private", wd_)
                      if sys.platform == "darwin"
                      else 'file:%s' % wd_)
     self.assertEqual(hdfs.path.realpath('file:%s' % link), expected_path)
     hdfs.rmr(wd)
开发者ID:kikkomep,项目名称:pydoop,代码行数:10,代码来源:test_path.py


示例10: delete_files

def delete_files(remote_basedir, retention):
    inodes = walk_remotely(remote_basedir)
    now = time.time()
    deleted_files = []
    for inode in inodes:
        if now - inode['last_mod'] > retention and inode['kind'] == 'file':
            LOGGER.debug("Deleting file {0}".format(inode['path']))
            hdfs.rmr(inode['path'])
            deleted_files.append(inode['path'])
    return deleted_files
开发者ID:hopshadoop,项目名称:hops-hadoop-chef,代码行数:10,代码来源:hadoop_logs_mgm.py


示例11: runTest

 def runTest(self):
     path = make_random_str() + UNI_CHR
     hdfs.dump("foo\n", path)
     st = hdfs.path.stat(path)
     atime, mtime = [getattr(st, 'st_%stime' % _) for _ in 'am']
     new_atime, new_mtime = atime + 100, mtime + 200
     hdfs.path.utime(path, (new_atime, new_mtime))
     st = hdfs.path.stat(path)
     self.assertEqual(st.st_atime, new_atime)
     self.assertEqual(st.st_mtime, new_mtime)
     hdfs.rmr(path)
开发者ID:kikkomep,项目名称:pydoop,代码行数:11,代码来源:test_path.py


示例12: _tear_down_flink_session

def _tear_down_flink_session(app_id):
    if not app_id:
        raise ValueError("_tear_down_flink_session: empty app id!")

    cmd = [ 'yarn', 'application', '-kill', app_id ]
    logger.info("Killing flink session with app id '%s'", app_id)
    logger.debug("Command: %s", cmd)
    subprocess.check_call(cmd)
    # clean up temporary yarn session files, if any
    path = ".flink/" + app_id
    if phdfs.path.exists(path):
        logger.info("Also removing the session's temporary files in %s", path)
        phdfs.rmr(path)
开发者ID:ilveroluca,项目名称:flink-pipeline,代码行数:13,代码来源:workflow.py


示例13: test_isdir

 def test_isdir(self):
     for path in self.path, self.u_path:
         self.assertFalse(hdfs.path.isdir(path))
         try:
             hdfs.dump("foo\n", path)
             self.assertFalse(hdfs.path.isdir(path))
             hdfs.rmr(path)
             hdfs.mkdir(path)
             self.assertTrue(hdfs.path.isdir(path))
         finally:
             try:
                 hdfs.rmr(path)
             except IOError:
                 pass
开发者ID:kikkomep,项目名称:pydoop,代码行数:14,代码来源:test_path.py


示例14: test_kind

 def test_kind(self):
     for path in self.path, self.u_path:
         self.assertTrue(hdfs.path.kind(path) is None)
         try:
             hdfs.dump("foo\n", path)
             self.assertEqual('file', hdfs.path.kind(path))
             hdfs.rmr(path)
             hdfs.mkdir(path)
             self.assertEqual('directory', hdfs.path.kind(path))
         finally:
             try:
                 hdfs.rmr(path)
             except IOError:
                 pass
开发者ID:kikkomep,项目名称:pydoop,代码行数:14,代码来源:test_path.py


示例15: test_isdir

 def test_isdir(self):
   path = utils.make_random_str()
   self.assertFalse(hdfs.path.isdir(path))
   try:
     hdfs.dump("foo\n", path)
     self.assertFalse(hdfs.path.isdir(path))
     hdfs.rmr(path)
     hdfs.mkdir(path)
     self.assertTrue(hdfs.path.isdir(path))
   finally:
     try:
       hdfs.rmr(path)
     except IOError:
       pass
开发者ID:ilveroluca,项目名称:pydoop,代码行数:14,代码来源:test_path.py


示例16: test_kind

 def test_kind(self):
   path = utils.make_random_str()
   self.assertTrue(hdfs.path.kind(path) is None)
   try:
     hdfs.dump("foo\n", path)
     self.assertEqual('file', hdfs.path.kind(path))
     hdfs.rmr(path)
     hdfs.mkdir(path)
     self.assertEqual('directory', hdfs.path.kind(path))
   finally:
     try:
       hdfs.rmr(path)
     except IOError:
       pass
开发者ID:ilveroluca,项目名称:pydoop,代码行数:14,代码来源:test_path.py


示例17: main

def main(argv):
  logger = logging.getLogger("main")
  logger.setLevel(logging.INFO)
  local_input = argv[1]
  with open(MR_SCRIPT) as f:
    pipes_code = pts.add_sys_path(f.read())
  runner = hadut.PipesRunner(prefix=PREFIX, logger=logger)
  runner.set_input(local_input, put=True)
  runner.set_exe(pipes_code)
  runner.run()
  res = runner.collect_output()
  runner.clean()
  hdfs.rmr(HDFS_WD)
  logger.info("checking results")
  expected_res = local_vc(local_input)
  logger.info(check(res, expected_res))
开发者ID:ZEMUSHKA,项目名称:pydoop,代码行数:16,代码来源:self_contained.py


示例18: execute

    def execute(self):
        """
        Execute workflow in dedicated directory
        """
        hdfs_output_dir = "workflow_output_{}".format(time.time())
        logger.debug("Setting up workflow")
        logger.debug("CWD: %s", os.getcwd())
        logger.debug("workflow output directory: %s", hdfs_output_dir)
        cmd = [ self._program ] + [ str(arg) for arg in  self._args ]
        cmd.append(self._input_dir)
        cmd.append(hdfs_output_dir)
        logger.debug("workflow command: %s", cmd)
        wf_logfile = os.path.abspath(GlobalConf['workflow_logfile'])
        logger.info("Executing worflow")
        logger.info("Writing workflow log to %s", wf_logfile)

        self._clear_caches()

        try:
            with open(wf_logfile, 'a') as f:
                logger.info("Starting workflow")
                start_time = time.time()
                retcode = subprocess.call(cmd, stdout=f, stderr=subprocess.STDOUT)
            end_time = time.time()
            run_time = end_time - start_time

            attempt_info = AttemptInfo(cmd, retcode, wf_logfile, run_time)

            if retcode == 0:
                logger.info("Workflow finished")
                logger.info("Attempt took %0.2f seconds", run_time)
                bcl, align = self._get_part_times_from_log(wf_logfile)
                attempt_info.bcl_secs = bcl
                attempt_info.align_secs = align
            else:
                logger.info("Workflow FAILED with exit code %s", retcode)
            return attempt_info
        finally:
            try:
                if phdfs.path.exists(hdfs_output_dir):
                    logger.debug("Removing workflow's temporary output directory %s", hdfs_output_dir)
                    phdfs.rmr(hdfs_output_dir)
            except StandardError as e:
                logger.error("Failed to clean up workflow's output directory  %s", hdfs_output_dir)
                logger.exception(e)
开发者ID:ilveroluca,项目名称:flink-pipeline,代码行数:45,代码来源:experiment.py


示例19: stat

 def stat(self):
     if hdfs.default_is_local():
         return
     bn = '%s%s' % (make_random_str(), UNI_CHR)
     fn = '/user/%s/%s' % (DEFAULT_USER, bn)
     fs = hdfs.hdfs("default", 0)
     p = "hdfs://%s:%s%s" % (fs.host, fs.port, fn)
     with fs.open_file(fn, 'w') as fo:
         fo.write(make_random_str())
     info = fs.get_path_info(fn)
     fs.close()
     s = hdfs.path.stat(p)
     for n1, n2 in self.NMAP.iteritems():
         attr = getattr(s, n1, None)
         self.assertFalse(attr is None)
         self.assertEqual(attr, info[n2])
     self.__check_extra_args(s, info)
     self.__check_wrapper_funcs(p)
     hdfs.rmr(p)
开发者ID:kikkomep,项目名称:pydoop,代码行数:19,代码来源:test_path.py


示例20: run

 def run(self):
     exit_code = 1
     with tempfile.NamedTemporaryFile() as f:
         self.log.debug("opened scratch MR job input file %s", f.name)
         # We write the files to be compressed to a temporary file.  Later we'll re-read
         # this temporary file to rename the files as well.  I've opted not to keep the
         # table in memory in the hope of scaling better to jobs with a large number of
         # files (we reduce memory requirements).
         num_files = self.__write_mr_input(f)
         f.flush()
         self.log.debug("Finished writing temp input file")
         input_filename = tempfile.mktemp(dir=os.path.dirname(self.output_path), prefix="dist_txt_zipper_input")
         tmpfile_uri = "file://%s" % f.name
         try:
             self.log.debug("copying input from %s to %s", tmpfile_uri, input_filename)
             hdfs.cp(tmpfile_uri, input_filename)
             self.log.info("Run analyzed.  Launching distributed job")
             # launch mr task
             pydoop_args = \
                 [ 'script', '--num-reducers', '0','--kv-separator', '',
                   '-Dmapred.map.tasks=%d' % num_files,
                   '-Dmapred.input.format.class=org.apache.hadoop.mapred.lib.NLineInputFormat',
                   '-Dmapred.line.input.format.linespermap=1',
                   '-Dmapred.output.compress=true',
                   '-Dmapred.output.compression.codec=%s' % 'org.apache.hadoop.io.compress.GzipCodec',
                   text_zipper_mr.__file__,
                   input_filename,
                   self.output_path]
             self.log.debug("pydoop_args: %s", pydoop_args)
             self.log.info("Compressing %s files", num_files)
             pydoop_app.main(pydoop_args)
             self.log.info("Distributed job complete")
             self.rename_compressed_files(f)
             self.log.info("finished")
             exit_code = 0
         finally:
             try:
                 self.log.debug("Removing temporary input file %s", input_filename)
                 hdfs.rmr(input_filename)
             except IOError as e:
                 self.log.warning("Problem cleaning up.  Error deleting temporary input file %s", input_filename)
                 self.log.exception(str(e))
         return exit_code
开发者ID:crs4,项目名称:hadoop-galaxy,代码行数:43,代码来源:dist_text_zipper.py



注:本文中的pydoop.hdfs.rmr函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pipes.run_task函数代码示例发布时间:2022-05-25
下一篇:
Python hdfs.open函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap