• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python hdfs.hdfs函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pydoop.hdfs.hdfs函数的典型用法代码示例。如果您正苦于以下问题:Python hdfs函数的具体用法?Python hdfs怎么用?Python hdfs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了hdfs函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: write

def write(writeFlag):
    if (writeFlag == True):
        # instantiate hadoop
        hdfs.hdfs()
        
        targetPath = config.targetPath;
        targetDirectory = config.targetDirectory;
        sourceFile = config.sourceFile;
        
        print("Target Path: " + targetPath);
        print("Target Directory: " + targetDirectory);
        print("Source Path: " + sourceFile);
        
        dumpFile = open(sourceFile, "r");
        fullText = dumpFile.read();
        dumpFile.close();
        
        # write to hadoop
        #hdfs.mkdir(targetDirectory)
        hdfs.dump(fullText, targetPath)
#hdfs.cp(sourceFile, targetPath)

#print (hdfs.ls("test4"))
#files = hdfs.ls("test4")

# read from hadoop
#hdfs.get("test4/hello.txt", "/tmp/hello.txt")
#with open("/tmp/hello.txt") as f:
#	print f.read()

#print(hdfs.ls("test", "hduser1"))
#text = hdfs.load("test/hello.txt")
#print text
开发者ID:davedwards,项目名称:beautiful-data,代码行数:33,代码来源:hadoopWriter.py


示例2: tearDown

 def tearDown(self):
   fs = hdfs.hdfs("", 0)
   fs.delete(self.local_wd)
   fs.close()
   fs = hdfs.hdfs("default", 0)
   fs.delete(self.hdfs_wd)
   fs.close()
开发者ID:ZEMUSHKA,项目名称:pydoop,代码行数:7,代码来源:test_hdfs.py


示例3: capacity

 def capacity(self):
     fs = hdfs.hdfs("", 0)
     self.assertRaises(RuntimeError, fs.capacity)
     fs.close()
     if not hdfs.default_is_local():
         fs = hdfs.hdfs("default", 0)
         cap = fs.capacity()
         self.assertGreaterEqual(cap, 0)
开发者ID:kikkomep,项目名称:pydoop,代码行数:8,代码来源:test_hdfs.py


示例4: cache

 def cache(self):
     orig_fs = hdfs.hdfs(*self.hp_cases[0])
     for host, port in self.hp_cases[1:]:
         fs = hdfs.hdfs(host, port)
         self.assertTrue(fs.fs is orig_fs.fs)
         fs.close()
         self.assertFalse(orig_fs.closed)
     orig_fs.close()
     self.assertTrue(orig_fs.closed)
开发者ID:jkahn,项目名称:pydoop-code,代码行数:9,代码来源:test_hdfs_fs.py


示例5: cache

 def cache(self):
     for (h1, p1), (h2, p2) in product(self.hp_cases, repeat=2):
         hdfs.hdfs._CACHE.clear()
         hdfs.hdfs._ALIASES = {"host": {}, "port": {}, "user": {}}  # FIXME
         with hdfs.hdfs(h1, p1) as fs1:
             with hdfs.hdfs(h2, p2) as fs2:
                 print ' * %r vs %r' % ((h1, p1), (h2, p2))
                 self.assertTrue(fs2.fs is fs1.fs)
             for fs in fs1, fs2:
                 self.assertFalse(fs.closed)
         for fs in fs1, fs2:
             self.assertTrue(fs.closed)
开发者ID:CynthiaYiqingHuang,项目名称:pydoop,代码行数:12,代码来源:test_hdfs_fs.py


示例6: _hdfs_filesystem

def _hdfs_filesystem():
    """Retrieve references to the local and HDFS file system.

    Need to be able to specify host/port. For now, works off defaults.
    """
    fs = hdfs("default", 0)
    lfs = hdfs("", 0)
    try:
        yield fs, lfs
    finally:
        fs.close()
        lfs.close()
开发者ID:CosteaPaul,项目名称:bcbb,代码行数:12,代码来源:hadoop_run.py


示例7: read

def read(readFlag):
    print(readFlag);
    if (readFlag == True):
        targetFile = config.targetFile.strip()
        targetDirectory = config.targetDirectory.strip()
        targetPath = config.targetPath
        
        print(targetPath)
        
        # instantiate hadoop
        hdfs.hdfs()
        
        # read from hadoop
        fileToRead = hdfs.open(targetPath)
        print(fileToRead.read())
开发者ID:davedwards,项目名称:beautiful-data,代码行数:15,代码来源:hadoopReader.py


示例8: __warn_user_if_wd_maybe_unreadable

  def __warn_user_if_wd_maybe_unreadable(self, abs_remote_path):
    """
    Check directories above the remote module and issue a warning if
    they are not traversable by all users.

    The reasoning behind this is mainly aimed at set-ups with a centralized
    Hadoop cluster, accessed by all users, and where the Hadoop task tracker
    user is not a superuser; an example may be if you're running a shared
    Hadoop without HDFS (using only a POSIX shared file system).  The task
    tracker correctly changes user to the job requester's user for most
    operations, but not when initializing the distributed cache, so jobs who
    want to place files not accessible by the Hadoop user into dist cache fail.
    """
    host, port, path = hdfs.path.split(abs_remote_path)
    if host == '' and port == 0: # local file system
      host_port = "file:///"
    else:
      # FIXME: this won't work with any scheme other than hdfs:// (e.g., s3)
      host_port = "hdfs://%s:%s/" % (host, port)
    path_pieces = path.strip('/').split(os.path.sep)
    fs = hdfs.hdfs(host, port)
    for i in xrange(0, len(path_pieces)):
      part = os.path.join(host_port, os.path.sep.join(path_pieces[0:i+1]))
      permissions = fs.get_path_info(part)['permissions']
      if permissions & 0111 != 0111:
        self.logger.warning(
          "the remote module %s may not be readable\n" +
          "by the task tracker when initializing the distributed cache.\n" +
          "Permissions on path %s: %s", abs_remote_path, part, oct(permissions))
        break
开发者ID:ilveroluca,项目名称:pydoop,代码行数:30,代码来源:script.py


示例9: setUp

 def setUp(self):
   if hdfs.default_is_local():
     self.root = "file:"
   else:
     fs = hdfs.hdfs("default", 0)
     self.root = "hdfs://%s:%s" % (fs.host, fs.port)
     fs.close()
开发者ID:ilveroluca,项目名称:pydoop,代码行数:7,代码来源:test_path.py


示例10: connect

 def connect(self):
     for host, port in self.hp_cases:
         for user in self.u_cases:
             expected_user = user or CURRENT_USER
             fs = hdfs.hdfs(host, port, user=user)
             self.assertEqual(fs.user, expected_user)
             fs.close()
开发者ID:kmatzen,项目名称:pydoop,代码行数:7,代码来源:test_hdfs_fs.py


示例11: run

    def run(self):
        if self.options is None:
            raise RuntimeError("You must call parse_cmd_line before run")

        if self.logger.isEnabledFor(logging.DEBUG):
            self.logger.debug("Running Seqal")
            self.logger.debug("Properties:\n%s", "\n".join( sorted([ "%s = %s" % (str(k), str(v)) for k,v in self.properties.iteritems() ]) ))
        self.logger.info("Input: %s; Output: %s; reference: %s", self.options.input, self.options.output, self.options.reference)

        try:
            self.hdfs = phdfs.hdfs('default', 0)
            self.__validate()

            self.remote_bin_name = tempfile.mktemp(prefix='seqal_bin.', suffix=str(random.random()), dir='')
            try:
                with self.hdfs.open_file(self.remote_bin_name, 'w') as script:
                    self.__write_pipes_script(script)

                full_name = self.hdfs.get_path_info(self.remote_bin_name)['name']

                return seal_utilities.run_pipes(full_name, self.options.input, self.options.output,
                    properties=self.properties, args_list=self.left_over_args)
            finally:
                try:
                    self.hdfs.delete(self.remote_bin_name) # delete the temporary pipes script from HDFS
                    self.logger.debug("pipes script %s deleted", self.remote_bin_name)
                except:
                    self.logger.error("Error deleting the temporary pipes script %s from HDFS", self.remote_bin_name)
                    ## don't re-raise the exception.  We're on our way out
        finally:
            if self.hdfs:
                tmp = self.hdfs
                self.hdfs = None
                tmp.close()
                self.logger.debug("HDFS closed")
开发者ID:pinno,项目名称:seal,代码行数:35,代码来源:seqal_run.py


示例12: stat_on_local

 def stat_on_local(self):
     wd_ = tempfile.mkdtemp(prefix='pydoop_', suffix=UNI_CHR)
     p_ = os.path.join(wd_, make_random_str())
     if hdfs.default_is_local():
         wd, p = wd_, p_
         host = "default"
     else:
         wd, p = ('file:%s' % _ for _ in (wd_, p_))
         host = ""
     fs = hdfs.hdfs(host, 0)
     with fs.open_file(p_, 'w') as fo:
         fo.write(make_random_str())
     info = fs.get_path_info(p_)
     fs.close()
     s = hdfs.path.stat(p)
     os_s = os.stat(p_)
     for n in dir(s):
         if n.startswith('st_'):
             try:
                 exp_v = getattr(os_s, n)
             except AttributeError:
                 try:
                     exp_v = info[self.NMAP[n]]
                 except KeyError:
                     continue
                 self.assertEqual(getattr(s, n), exp_v)
     self.__check_extra_args(s, info)
     self.__check_wrapper_funcs(p)
     hdfs.rmr(wd)
开发者ID:kikkomep,项目名称:pydoop,代码行数:29,代码来源:test_path.py


示例13: get_res

def get_res(output_dir):
    fs = hdfs()
    data = []
    for x in fs.list_directory(output_dir):
        if os.path.split(x['path'])[-1].startswith('part-'):
            with fs.open_file(x['path']) as f:
                data.append(f.read())
    all_data = ''.join(data)
    return pts.parse_mr_output(all_data, vtype=int)
开发者ID:kikkomep,项目名称:pydoop,代码行数:9,代码来源:check_results.py


示例14: __init__

 def __init__(self, context):
     super(FastaReader, self).__init__()
     self.logger = logging.getLogger(self.__class__.__name__)
     self.isplit = InputSplit(context.getInputSplit())
     self.host, self.port, self.fpath = split_hdfs_path(self.isplit.filename)
     self.fs = hdfs(self.host, self.port)
     self.file = self.fs.open_file(self.fpath, os.O_RDONLY)
     self._iterator = (SeqIO.parse(self.file, "fasta") if
                       self.isplit.offset == 0 else None)
开发者ID:16NWallace,项目名称:bcbb,代码行数:9,代码来源:distblast_pipes.py


示例15: compute_vc

def compute_vc(input_dir):
    fs = hdfs()
    data = []
    for x in fs.list_directory(input_dir):
        with fs.open_file(x['path']) as f:
            data.append(f.read())
    all_data = ''.join(data)
    vowels = re.findall('[AEIOUY]', all_data.upper())
    return Counter(vowels)
开发者ID:kikkomep,项目名称:pydoop,代码行数:9,代码来源:check_results.py


示例16: runTest

 def runTest(self):
     current_user = getpass.getuser()
     cwd = os.getcwd()
     os.chdir(tempfile.gettempdir())
     for user in None, current_user, "nobody":
         expected_user = current_user
         fs = hdfs.hdfs("", 0, user=user)
         self.assertEqual(fs.user, expected_user)
         fs.close()
     os.chdir(cwd)
开发者ID:CynthiaYiqingHuang,项目名称:pydoop,代码行数:10,代码来源:test_local_fs.py


示例17: main

def main():
    fs = hdfs.hdfs()
    try:
        root = "%s/%s" % (fs.working_directory(), TEST_ROOT)
        if not isdir(fs, root):
            sys.exit("%r does not exist" % root)
        print "BS(MB)\tBYTES"
        for k, v in usage_by_bs(fs, root).iteritems():
            print "%.1f\t%d" % (k / float(MB), v)
    finally:
        fs.close()
开发者ID:CynthiaYiqingHuang,项目名称:pydoop,代码行数:11,代码来源:treewalk.py


示例18: get_hosts

 def get_hosts(self):
     if hdfs.default_is_local():
         # only run on HDFS
         return
     hdfs.dump(self.data, self.hdfs_paths[0])
     fs = hdfs.hdfs("default", 0)
     hs = fs.get_hosts(self.hdfs_paths[0], 0, 10)
     self.assertTrue(len(hs) > 0)
     self.assertRaises(
         ValueError, fs.get_hosts, self.hdfs_paths[0], -10, 10
     )
     self.assertRaises(ValueError, fs.get_hosts, self.hdfs_paths[0], 0, -10)
开发者ID:kikkomep,项目名称:pydoop,代码行数:12,代码来源:test_hdfs.py


示例19: copy

 def copy(self):
   local_fs = hdfs.hdfs('', 0)
   local_wd = make_wd(local_fs)
   from_path = os.path.join(local_wd, uuid.uuid4().hex)
   content = uuid.uuid4().hex
   with open(from_path, "w") as f:
     f.write(content)
   to_path = self._make_random_file()
   local_fs.copy(from_path, self.fs, to_path)
   local_fs.close()
   with self.fs.open_file(to_path) as f:
     self.assertEqual(f.read(), content)
   shutil.rmtree(local_wd)
开发者ID:kmatzen,项目名称:pydoop,代码行数:13,代码来源:common_hdfs_tests.py


示例20: main

def main(directory, topic, byline):
    #get a hdfs object
    myHdfs = hdfs.hdfs()
    myPath = myHdfs.walk(directory)
    
    # a global variable
    global producer 

    # Get a producer object
    producer = KafkaProducer(bootstrap_servers=["node4:6667"], compression_type='gzip', acks=1, retries=2)
    
    for myfile in myPath:
        #Skip directory recursive
        if myfile["kind"] == "directory":
            logger.debug("ignoring %s" %(myfile))
            continue
        
        elif myfile["kind"] == "file":
            pass
        
        else:
            raise Exception, "Unknown kind %s for %s" %(myfile["kind"], myfile["name"])
            
        #Skip name in particoular
        if "_SUCCESS" in myfile["name"] or "_temporary" in myfile["name"]:
            logger.debug("ignoring %s" %(myfile))
            continue
        
        #Skip 0 size files
        if myfile["size"] == 0:
            logger.debug("ignoring %s" %(myfile))
            continue
        
        logger.info("Working on %s" %(myfile["name"]))

        #call processChunk if I want to submit chunk
        if byline is False:
            processChunk(myfile, topic)
            
        else:
            #Otherwise submit line by line
            processLine(myfile, topic)

        #with file open
        logger.info("Completed %s" %(myfile["name"]))
        
        #sleep some time
        time.sleep(1)
                    
    # for all files in HDFS
    producer.close()
开发者ID:bunop,项目名称:ccc-capstone,代码行数:51,代码来源:kafka-producer.py



注:本文中的pydoop.hdfs.hdfs函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python hdfs.mkdir函数代码示例发布时间:2022-05-25
下一篇:
Python hdfs.dump函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap