In tutorial you can see "Created 3 years ago"
- so it is simply outdated.
For example currently you use await
instead of yield from
.
Better read official documentation for aiohttp.
In The aiohttp Request Lifecycle you can see similar example with fetch()
and main()
:
import aiohttp
import asyncio
async def fetch(session, url):
async with session.get(url) as response:
return await response.text()
async def main():
async with aiohttp.ClientSession() as session:
html = await fetch(session, 'http://python.org')
print(html)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
And it could be with your example
import aiohttp
import asyncio
async def fetch(url, idx):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
print("data fetched successfully for:", idx)
#print(await response.text(), response.status)
else:
print("data fetch failed for:", idx)
print(await response.text(), response.status)
async def main():
url = 'https://yahoo.com'
urls = [url] * 10
for idx, url in enumerate(urls):
await fetch(url, idx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
or without session
import asyncio
import aiohttp
async def fetch_page(url, idx):
async with aiohttp.request('GET', url) as response:
if response.status == 200:
print("data fetched successfully for:", idx)
else:
print("data fetch failed for:", idx)
print(response.content, response.status)
async def main():
url = 'https://yahoo.com'
urls = [url] * 100
for idx, url in enumerate(urls):
await fetch_page(url, idx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…