本文整理汇总了Python中twitter__login.login函数的典型用法代码示例。如果您正苦于以下问题:Python login函数的具体用法?Python login怎么用?Python login使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了login函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self):
self.counter = 0
self.fprefix = ""
self.output = None
self.twitter_rest = twitter__login.login()
self.twitter_stream = twitter.TwitterStream(auth=twitter.oauth.OAuth(access_token, access_token_secret,consumer_key, consumer_secret))
self.fname = None
self.ufname = None
开发者ID:weikengary,项目名称:CS4242AS3,代码行数:8,代码来源:StreamCrawler.py
示例2: __init__
def __init__(self):
self.listOfFiles = None
self.listOfUsers = []
self.listOfUserList = [] #(id, title, des)
self.twitter_rest = twitter__login.login()
self.listOfListsMetaInfo = []
self.aspect = None
self.u = Utils.Utils()
pass
开发者ID:weikengary,项目名称:CS4242AS3,代码行数:9,代码来源:UserListCrawler.py
示例3: get_stream
def get_stream(self,TIMELINE_NAME,MAX_PAGES):
USER = None
KW = { # For the Twitter API call
'count': 200,
'trim_user': 'true',
'include_rts' : 'true',
'since_id' : 1,
}
if TIMELINE_NAME == 'user':
USER = sys.argv[3]
KW['screen_name'] = USER
if TIMELINE_NAME == 'home' and MAX_PAGES > 4:
MAX_PAGES = 4
if TIMELINE_NAME == 'user' and MAX_PAGES > 16:
MAX_PAGES = 16
t = login()
client = MongoClient('localhost',27017)
db = client.test_database
posts = db.tw_data #Collection name
posts.drop()
api_call = getattr(t.statuses, TIMELINE_NAME + '_timeline')
tweets = makeTwitterRequest(api_call, **KW)
for tweet in tweets:
if(tweet['lang']=='en'):
#print tweet['text']
post_id = posts.insert(tweet)
#print '# post id'
#print post_id
#print 'Fetched %i tweets' % len(tweets)
page_num = 1
while page_num < MAX_PAGES and len(tweets) > 0:
# Necessary for traversing the timeline in Twitter's v1.1 API.
# See https://dev.twitter.com/docs/working-with-timelines
KW['max_id'] = getNextQueryMaxIdParam(tweets)
api_call = getattr(t.statuses, TIMELINE_NAME + '_timeline')
tweets = makeTwitterRequest(api_call, **KW)
#print json.dumps(tweets,indent = 3)
for tweet in tweets:
if(tweet['lang']=='en'):
#print tweet['text']
post_id = posts.insert(tweet)
#print '# post id'
#print post_id
#print 'Fetched %i tweets' % len(tweets)
page_num += 1
开发者ID:elferrus7,项目名称:QMBlogging,代码行数:56,代码来源:network.py
示例4: load_followers
def load_followers():
#for each user extracted getusers method , load followers from twitter
t=twitter__login.login()
in_file=open('users.txt','rb')
count=1
count2=1
#twitter only allows 15 users's followers upload once every 30 min ,so this method is run a complete with adjustments in the count variable
for row in in_file:
name=row.strip()
if (count>=47):
followers=t.followers.ids(screen_name=name)
#for every user obtain list of followers an store in follower text file (use user name for identifcation)
out_file=name+".txt"
out=open(out_file,'w')
pickle.dump(followers['ids'],out)
out.close()
count2+=1
count+=1
#tried adding a time variable to sleep after every 15 users, worked sometimes
if(count2%15==0):
time.sleep(180)
count2=1
开发者ID:lynnUg,项目名称:TwitterAnalysis,代码行数:22,代码来源:Mediaseige2.py
示例5: login
# -*- coding: utf-8 -*-
import sys
import json
import redis
from twitter__login import login
# A makeTwitterRequest call through to the /users/lookup
# resource, which accepts a comma separated list of up
# to 100 screen names. Details are fairly uninteresting.
# See also http://dev.twitter.com/doc/get/users/lookup
from twitter__util import getUserInfo
if __name__ == "__main__":
screen_names = sys.argv[1:]
t = login()
r = redis.Redis()
print json.dumps(
getUserInfo(t, r, screen_names=screen_names),
indent=4
)
开发者ID:ANB2,项目名称:Mining-the-Social-Web,代码行数:23,代码来源:friends_followers__get_user_info.py
示例6: auth_to_twitter
def auth_to_twitter():
return twitter__login.login()
开发者ID:lynnUg,项目名称:TwitterAnalysis,代码行数:3,代码来源:Ugtraffic.py
示例7: login
KW = { # For the Twitter API call
'count': 200,
'trim_user': 'true',
'include_rts' : 'true',
'since_id' : 1,
}
if TIMELINE_NAME == 'user':
USER = sys.argv[3]
KW['screen_name'] = USER
if TIMELINE_NAME == 'home' and MAX_PAGES > 4:
MAX_PAGES = 4
if TIMELINE_NAME == 'user' and MAX_PAGES > 16:
MAX_PAGES = 16
t = login()
# Establish a connection to a CouchDB database
server = couchdb.Server('http://localhost:5984')
DB = 'tweets-%s-timeline' % (TIMELINE_NAME, )
if USER:
DB = '%s-%s' % (DB, USER)
try:
db = server.create(DB)
except couchdb.http.PreconditionFailed, e:
# Already exists, so append to it, keeping in mind that duplicates could occur
db = server[DB]
开发者ID:farquasar1,项目名称:Mining-the-Social-Web,代码行数:31,代码来源:Chapter5.py
示例8: GetIds
def GetIds():
#obtain the ids for every uses, these are to be used to create nodes and edges that will be used to
t=twitter__login.login()
file_1=open('users.txt','rb')
file_2=open('user_id.txt','w')
开发者ID:lynnUg,项目名称:TwitterAnalysis,代码行数:5,代码来源:Mediaseige2.py
示例9: login
def login(self): # we make no aport here
return login()
开发者ID:elferrus7,项目名称:QMBlogging,代码行数:2,代码来源:network.py
示例10: on_stall_warning
return True # Don't kill the stream
def on_stall_warning(self, status):
print "Got Stall Warning message",str(status)
return True # Don't kill the stream
try:
# my config is hard coded
fn = os.path.join(os.environ['HOME'],'conf', 'twitter_mining.cfg')
config = ConfigParser.RawConfigParser()
config.read(fn)
while True:
try:
# oauth dance
auth = login(config)
# Create a streaming API and set a timeout value of 1 minute
streaming_api = tweepy.streaming.Stream(auth, CustomStreamListener(), timeout=60, secure=True)
Q = sys.argv[2:]
print "Track parameters",str(Q)
streaming_api.filter(follow=None, track=Q)
except Exception, ex:
err = "'%s' '%s' Error '%s' '%s'"%(dbname, str(datetime.now()), str(ex), get_trace())
print err
file('errors.txt','a').write(err+'\n')
finally:
print "disconnecting..."
streaming_api.disconnect()
# time.sleep(60)
except KeyboardInterrupt:
print "got keyboardinterrupt"
开发者ID:3gon,项目名称:twitter_mining,代码行数:31,代码来源:filter_tweets_streaming_api.py
示例11: getFile
def getFile(fname):
try:
f = open(fname)
d = json.load(f)
f.close()
return d
except:
return None
def saveFile(d):
print "Saving!"
f = open(d["fname"],'w')
json.dump(d,f)
f.close()
t = twitter__login.login()
screen_name = 'AdventureSauce1'
response = t.users.show(screen_name=screen_name)
user_id = response['id']
dname = screen_name+'_net.json'
d = getFile(dname)
if d is None:
d = dict()
d["fname"] = dname
specialcases = []
crawlUser(user_id,d,t)
handleSpecialCases(t,d,specialcases)
saveFile(d)
开发者ID:PyEatingContest,项目名称:twittersauce,代码行数:31,代码来源:socialnetwork.py
注:本文中的twitter__login.login函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论