• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python pyhs2.connect函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyhs2.connect函数的典型用法代码示例。如果您正苦于以下问题:Python connect函数的具体用法?Python connect怎么用?Python connect使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了connect函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: listen

def listen(id, listen_type):
    """Returning the number of playevents longer than 30 sec with ok flag for certain ids"""

    # testing if purchase type is correct
    if listen_type not in ('release', 'track', 'playlist'):
        raise AttributeError("provide valid listen_type: 'release', 'playlist' or 'track'")

    #splitting ids
    cid = id_split(id)

    # uncomment the line below to print created Hive query
    # print ("select count(1), src_id, src_type from playevent"
    #     "where (%s) and src_type = '%s' and play_duration > 30 and ok_flag"
    #     "group by src_id, src_type" % (cid, listen_type))

    # connecting to Hive, sending query, returning results of query
    conn = pyhs2.connect(host='nif-nif.zvq.me', port=10000, authMechanism="NOSASL", user='hive', password='test',
                         database='default')
    cur = conn.cursor()
    cur.execute(
        "select count(1), src_id, src_type from playevent"
        "where (%s) and src_type = '%s' and play_duration > 30 and ok_flag"
        "group by src_id, src_type" % (cid, listen_type))
    results = cur.fetch()
    cur.close()
    conn.close()
    return results
开发者ID:ToxaZ,项目名称:my_code,代码行数:27,代码来源:check.py


示例2: processQuery

    def processQuery(self, querylist,flag):

        propertyObj = ConfigProperties()
        hostname = propertyObj.localhivehost()
        portnumber = propertyObj.localhiveport()
        authentication = propertyObj.localhiveauthentication()
        username = propertyObj.localhiveuser()
        userpassword = propertyObj.localuserpassword()
        databasename = propertyObj.localhivedatabase()


        conn = pyhs2.connect(host=hostname, port = portnumber, authMechanism = authentication, user=username, password=userpassword, database = databasename)
        cur = conn.cursor()

        cur.execute(querylist['createdb'])
        cur.execute(querylist['workdb'])
        cur.execute(querylist['droptable1'])
        cur.execute(querylist['createtable1'])
        cur.execute(querylist['testcode'])
        cur.execute(querylist['droptable2'])
        cur.execute(querylist['createtable2'])

        if flag == 0:
            cur.close()
            conn.close()
开发者ID:rahmanwaliur,项目名称:ApiClient,代码行数:25,代码来源:workflowengine.py


示例3: process

    def process(self, tup):
        movie_id = tup.values[0]
        # [{'field_name' : 'field_value', ...}, ....]
        tweets = json.loads(tup.values[1])
        # For debugging.
        #movie_id = tup[0]
        #tweets = json.loads(tup[1])
        self.log('Received %d tweets for movie %s' % (len(tweets), movie_id))

        tweets_bow = []
        for t in tweets:
            text = self.processTweet(t['text'])
            bag_of_words = self.getWordsSet(text)
            tweets_bow.append(bag_of_words)

        features = self.build_features(tweets_bow)

        pred = self.classifier.predict(features)
        pos_count = sum(pred)
        neg_count = len(pred) - pos_count

        self.log('SE: %s   +ve:%d   -ve:%d' % (movie_id, pos_count, neg_count))

        with pyhs2.connect(host = get_config('host'), port = get_config('port'), authMechanism = get_config('auth'),
                           user = str(get_config('user')), database = get_config('database')) as conn:
            with conn.cursor() as cur:
                query = ("INSERT INTO " + get_config('se_score_tablename') + " VALUES (" + str(int(time.time()))
                         + ", '" + movie_id + "', " + str(pos_count) + ", " + str(neg_count) + ")")
                if self.verbose:
                    self.log(query)
                cur.execute(query)
开发者ID:gregce,项目名称:MIDS,代码行数:31,代码来源:sentiment_analysis.py


示例4: execute_sql

  def execute_sql (self, database_name, sql, fetch_result = False):
    import pyhs2
    conn = pyhs2.connect(host=self.host, port=self.port, authMechanism="PLAIN", user="hdfs", password="", database='default', timeout=5000)
    print "Connected to hiverserver2"
    # turn on tez and add serde jar
    c = conn.cursor()
    c.execute("set hive.execution.engine=tez")
    c.execute("set hive.cache.expr.evaluation=false")
    # c.execute("add jar %s" % self.hive_serdes_path)

    if database_name != None:
      c.execute("use %s" % database_name)

    # run actual command command
    print "Executing HiveQL: %s" % (sql)
    c.execute(sql)

    output = []
    if fetch_result:
      rows = c.fetchall()
      for row in rows:
        output.append(row)

    c.close()
    conn.close()

    return output
开发者ID:baskin,项目名称:mongo-hive,代码行数:27,代码来源:dw_util.py


示例5: aggregate

def aggregate():
    with pyhs2.connect(host='hive.athena.we7.local',
                       port=10000,
                       authMechanism="KERBEROS",
                       user='',
                       password='',
                       database='davec_sandbox'
                      ) as conn:

        with conn.cursor() as cur:

            cur.execute('''add file hdfs://athena/user/davec/agg_segment_daily_reducer.py''')

            # Hive chooses only one reducer by default (28 minutes). Force 15 (2.5 mins).
            cur.execute('''set mapred.reduce.tasks=15''')
            cur.execute('''create table if not exists davec_sandbox.agg_segment_daily (
                              segment_date    string,
                              segment_type    string,
                              user_segment    string,
                              users           int
                            )
                        ''')
            cur.execute('''
                            insert overwrite table davec_sandbox.agg_segment_daily
                            select segment_date,
                                   segment_type,
                                   user_segment,
                                   sum(cast(users as int))
                            from (
                                  select transform(*)
                                  using 'agg_segment_daily_reducer.py'
                                     as ( segment_date,
                                          segment_type,
                                          user_segment,
                                          users
                                        )
                                  from (
                                        select user_id,
                                               segment_type,
                                               user_segment,
                                               fact_year,
                                               fact_month,
                                               fact_day
                                        from events_super_mart.fact_user_segment
                                        --test--where segment_type = 'Value Segment'
                                        --test--and fact_year = 2014
                                        --test--and fact_month = 11
                                        distribute by user_id,
                                                      segment_type
                                        sort by user_id,
                                                segment_type,
                                                fact_year,
                                                cast(fact_month as int),
                                                cast(fact_day as int)
                                  ) user_segment
                            ) segment_by_date
                            group by segment_date,
                                     segment_type,
                                     user_segment
                        ''')
开发者ID:elisabeta-voicu,项目名称:fact_user_segment_analysis,代码行数:60,代码来源:agg_segment_daily_batch.py


示例6: connect

    def connect(self):
      print "I'm running but will hang some time. Please be patient..."
      
  
      with pyhs2.connect(host='cosmos.lab.fi-ware.org',
                         port=10000,
                         authMechanism="PLAIN",
                         user='',
                         password='',
                         database='default') as conn:
          with conn.cursor() as self.cur:
              #Show databases
              #print cur.getDatabases()

              #Execute query
              self.cur.execute("select * from andre_silva_fresh_serv_fresh_servpath_sensor_9_sensor_column")
              self.db_zone1_cols = []
              for i in self.cur.getSchema():
                  if("_md" not in i['columnName'].split('.')[1] and
                     "recv" not in i['columnName'].split('.')[1]):
                      self.db_zone1_cols.append(i['columnName'].split('.')[1])
              self.db_zone1_rows = self.cur.fetch()
              
              
              self.cur.execute("select * from andre_silva_fresh_serv_fresh_servpath_sensor_10_sensor_column")
              self.db_zone2_cols = []
              for i in self.cur.getSchema():
                  if("_md" not in i['columnName'].split('.')[1] and
                     "recv" not in i['columnName'].split('.')[1]):
                      self.db_zone2_cols.append(i['columnName'].split('.')[1])
              self.db_zone2_rows = self.cur.fetch()
              

      print "Whoa! I have a database!"
开发者ID:Introsys,项目名称:fresh,代码行数:34,代码来源:Report.py


示例7: __init__

    def __init__(self,using=''):
        """
        @param cursor_hander:数据库句柄
        """
        self.cursor = None
        self.cursor_hander = using
        self.connections = None
        
        if str(self.cursor_hander).rstrip() == '':
            print('please write Custom_Hive`s using param')
            exit(0)

                        
        databases = {
            'ares_dw':{'host':'10.0.0.2', 'user':'hive', 'password':'', 'database':'test', 'port':10000 ,'authMechanism':'NOSASL'},
            'hadoops2':{'host':'10.0.0.2', 'user':'hive', 'password':'', 'database':'test', 'port':10000 ,'authMechanism':'NOSASL'},
		    'basic_data':{'host':'10.0.0.2', 'user':'hive', 'password':'', 'database':'basic_data', 'port':10000 ,'authMechanism':'NOSASL'}
        }
        
        database = databases[self.cursor_hander]
	
	self.connections= pyhs2.connect(host=database['host'],
                      port= int(database['port']),
                      authMechanism= database['authMechanism'],
                      user=database['user'],
                      password=database['password'],
                      database=database['database'],
                      )

        self.cursor = self.connections.cursor()
开发者ID:jksd3344,项目名称:Analoglogin,代码行数:30,代码来源:hive.py


示例8: getSchema

def getSchema():
    settings = get_settings_from_file("spec.json")
    print(settings)

    conn = pyhs2.connect(host=settings.Param.HiveServer2_Host,
            port=int(settings.Param.HiveServer2_Port),
            authMechanism="PLAIN",
            user="hive",
            password="",
            database="default")
    query_sql = "DESCRIBE    %s" % settings.Input.table_a.val
    cur = conn.cursor()
    cur.execute(query_sql)

    a_schema = []
    for row in cur.fetch():
       a_schema.append(("a.%s AS a_%s") %(row[0],row[0]))
  
    query_sql = "DESCRIBE    %s" % settings.Input.table_b.val
    cur = conn.cursor()
    cur.execute(query_sql)

    b_schema = []
    for row in cur.fetch():
       b_schema.append(("b.%s AS b_%s")%(row[0],row[0]))
             
    cur.close()
    conn.close()
    return [a_schema,b_schema]
开发者ID:DataCanvasIO,项目名称:example-modules,代码行数:29,代码来源:main.py


示例9: getHiveData

def getHiveData(loadsql,createsql,dropsql,selectsql):

	with pyhs2.connect(host='localhost',
        	           port=10000,
        	           authMechanism="PLAIN",
        	           user='root',
        	           password='test',
        	           database='default') as conn:
    		with conn.cursor() as cur:
    			#Show databases
    			#print cur.getDatabases()
			print("--------")

			cur.execute(dropsql)
			print("--------")
			cur.execute(createsql)	
			print("--------")
			cur.execute(loadsql)
			print("--------")			
			cur.execute(selectsql)
			print("--------")			
	  		#Return column info from query
       			#print cur.getSchema()

        		#Fetch table results
			result = []
        		for i in cur.fetch():
            			result.append(i)
				yield i
开发者ID:decibel,项目名称:rebataur,代码行数:29,代码来源:hive_util.py


示例10: connect

    def connect(self, config):
        """
        connect hiveServer2 with your config

        Parameters:
        -----------
        config : dict.
          | config['host'] The host ip of hive server 2
          | config['port'] The port of hive server 2
          | config['authMechanism'] Most time the value is "NOSASL"
          | config['user'] The connect user
          | config['password'] The connect password
          | config['database'] The database which you want to connect

        Returns:
        --------
        None
        """

        self.conn = pyhs2.connect(host=config['host'],
                                  port=config['port'],
                                  authMechanism=config['authMechanism'],
                                  user=config['user'],
                                  password=config['password'],
                                  database=config['database'])
        self.cur = self.conn.cursor()
开发者ID:neutronest,项目名称:hiveserver2-helper,代码行数:26,代码来源:hiveserver2_helper.py


示例11: purchase

def purchase(id, purchase_type):
    """Returning the number of purchases for certain ids. Type of purchase (release or track) is needed"""

    # testing if purchase type is correct
    if purchase_type == 'release':
        ctype = 'release_id'
    elif purchase_type == 'track':
        ctype = 'track_id'
    else:
        raise AttributeError("provide valid purchase_type: 'release' or 'track'")

    #splitting ids
    cid = id_split(id)
    cid = cid.replace('src_id', ctype)

    # uncomment the line below to print created Hive query
    # print ('select count(transaction_id), %s from purchase where (%s) group by %s' % (ctype, cid, ctype))

    # connecting to Hive, sending query, returning results of query
    conn = pyhs2.connect(host='nif-nif.zvq.me', port=10000, authMechanism="NOSASL", user='hive', password='test',
                         database='default')
    cur = conn.cursor()
    cur.execute('select count(transaction_id), %s from purchase where (%s) group by %s' % (ctype, cid, ctype))
    results = cur.fetch()
    cur.close()
    conn.close()
    return results
开发者ID:ToxaZ,项目名称:my_code,代码行数:27,代码来源:check.py


示例12: load_data_orc

    def load_data_orc(self,table_name):
	 with pyhs2.connect(host=self.env['HIVE_HOST'],port=10000,authMechanism="PLAIN", user=self.env['USER_NAME'],password='',database=self.config['HIVE_DB']) as conn:
          with conn.cursor() as cur:
	    #set_hive="SET hive.mergejob.maponly=true"
	    #self.logger.info("RUNNING at HIVE: %s",set_hive)
            #cur.execute(set_hive)

	    #set_hive=" SET hive.merge.mapredfiles=true"
	    #self.logger.info("RUNNING at HIVE: %s",set_hive)
            #cur.execute(set_hive)

	    #set_hive="SET hive.merge.mapfiles=true"
	    #self.logger.info("RUNNING at HIVE: %s",set_hive)
            #cur.execute(set_hive)
	
	    #set_hive="SET hive.merge.size.per.task=256000000"
	    #self.logger.info("RUNNING at HIVE: %s",set_hive)
            #cur.execute(set_hive)

	    #set_hive="SET hive.merge.smallfiles.avgsize=16000000000"
	    #self.logger.info("RUNNING at HIVE: %s",set_hive)
            #cur.execute(set_hive)

	    #self.logger.info("RUNNING at HIVE: %s",set_hive)
            #cur.execute(set_hive)
	    
	    hive_sql="INSERT INTO TABLE  "+self.config['HIVE_DB']+"."+table_name
	    #+" PARTITION ("+primary_id+"="+primary_value+")"
	    hive_sql=hive_sql+" SELECT "+self.table.get_table_column(table_name)+" from "+self.config['HIVE_DB']+"."+table_name+"_text"
	    print hive_sql
	    self.logger.info("RUNNING at HIVE: %s",hive_sql)
            cur.execute(hive_sql)
开发者ID:atuldata,项目名称:Vertica-Hadoop-Integration-,代码行数:32,代码来源:sqoop_pyhive.py


示例13: get_connection

def get_connection(db_conn_conf, use_default_db):

    # Set up which database to use
    if use_default_db:
        db_name = "default"
    else:  # Set it to be the one specief in the config file
        db_name = db_conn_conf['database']

    # Create the connection
    if db_conn_conf['connector'] == 'hive':
        conn = pyhs2.connect(host=db_conn_conf['host'],
                 port=int(db_conn_conf['port']), 
                 authMechanism="PLAIN",
                 user=db_conn_conf['user'],
                 database=db_name)
        return conn
    elif db_conn_conf['connector'] == 'impala':
        conn = impala.dbapi.connect(host=db_conn_conf['host'],
                 port=db_conn_conf['port'], 
                 user=db_conn_conf['user'],
                 database=db_name)
        return conn
    else:
        print "Can't find connector"
        sys.exit(0)
开发者ID:Bekterra,项目名称:data-formats,代码行数:25,代码来源:df_run_module.py


示例14: get_cursor

def get_cursor():
        conn = pyhs2.connect(host='',
               port=10000,
               authMechanism="PLAIN",
               user='hadoop',
               password='',
               database='test')
        return conn.cursor()
开发者ID:abhaystoic,项目名称:datamining,代码行数:8,代码来源:data_mining.py


示例15: get_conn

 def get_conn(self):
     db = self.get_connection(self.hiveserver2_conn_id)
     return pyhs2.connect(
         host=db.host,
         port=db.port,
         authMechanism=db.extra_dejson.get('authMechanism', 'NOSASL'),
         user=db.login,
         database=db.schema or 'default')
开发者ID:patrickleotardif,项目名称:airflow,代码行数:8,代码来源:hive_hooks.py


示例16: __init__

 def __init__(self, **hive_cfg):
     self.args = self.create_hive_config(**hive_cfg)
     self.columns = []
     try:
         self.conn = pyhs2.connect(**self.args)
         self.cursor = self.conn.cursor()
     except Exception as e:
         logging.error("Could not get a cursor. Reason:\n %s", e)
开发者ID:mariusmilea,项目名称:hiveweb,代码行数:8,代码来源:hive.py


示例17: get_conn

    def get_conn(self):
        db = self.get_connection(self.hiveserver2_conn_id)
        auth_mechanism = db.extra_dejson.get("authMechanism", "NOSASL")
        if conf.get("core", "security") == "kerberos":
            auth_mechanism = db.extra_dejson.get("authMechanism", "KERBEROS")

        return pyhs2.connect(
            host=db.host, port=db.port, authMechanism=auth_mechanism, user=db.login, database=db.schema or "default"
        )
开发者ID:russelldurrett,项目名称:airflow,代码行数:9,代码来源:hive_hooks.py


示例18: run

    def run(self, connection, date):
        target_date = (date - datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        raw_adi_logs_pathname = os.path.join(
            tempfile.gettempdir(),
            "%s.raw_adi_logs.TEMPORARY%s" % (
                target_date,
                '.txt'
            )
        )
        try:
            with open(raw_adi_logs_pathname, 'w') as f:
                hive = pyhs2.connect(
                    host=self.config.hive_host,
                    port=self.config.hive_port,
                    authMechanism=self.config.hive_auth_mechanism,
                    user=self.config.hive_user,
                    password=self.config.hive_password,
                    database=self.config.hive_database
                )

                cur = hive.cursor()
                query = self.config.query % target_date
                cur.execute(query)
                for row in cur:
                    if None not in row:
                        f.write(
                            "\t"
                            .join(
                                urllib2.unquote(str(v)).replace('\\', '\\\\')
                                for v in row
                            )
                        )
                        f.write("\n")

            with open(raw_adi_logs_pathname, 'r') as f:
                pgcursor = connection.cursor()
                pgcursor.copy_from(
                    f,
                    'raw_adi_logs',
                    null='None',
                    columns=[
                        'report_date',
                        'product_name',
                        'product_os_platform',
                        'product_os_version',
                        'product_version',
                        'build',
                        'build_channel',
                        'product_guid',
                        'count'
                    ]
                )
                pgcursor.execute(_RAW_ADI_QUERY, (target_date,))
        finally:
            if os.path.isfile(raw_adi_logs_pathname):
                os.remove(raw_adi_logs_pathname)
开发者ID:bramwelt,项目名称:socorro,代码行数:57,代码来源:fetch_adi_from_hive.py


示例19: __init__

	def __init__(self, host, port, user, passwd, db):
		self._host = host
		self._port = port
		self._user = user
		self._passwd = passwd
		self._db = db

		self._conn = pyhs2.connect(host = self._host, port = self._port, authMechanism = "PLAIN", user = self._user, password = self._passwd, database = self._db)

		self._cur = self._conn.cursor()
开发者ID:Leaderman,项目名称:dip,代码行数:10,代码来源:hiveserver2.py


示例20: get_conn

    def get_conn(self):
        db = self.get_connection(self.hiveserver2_conn_id)
        auth_mechanism = db.extra_dejson.get('authMechanism', 'NOSASL')
        if conf.get('core', 'security') == 'kerberos':
            auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')

        return pyhs2.connect(
            host=db.host,
            port=db.port,
            authMechanism=auth_mechanism,
            user=db.login,
            database=db.schema or 'default')
开发者ID:rmoorman,项目名称:airflow,代码行数:12,代码来源:hive_hooks.py



注:本文中的pyhs2.connect函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python cmd.YHSM_Cmd类代码示例发布时间:2022-05-25
下一篇:
Python treatment.FMRITreatment类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap