#爬虫的需求:爬取github上有关python的优质项目
#coding=utf-8
import requests
from bs4 import BeautifulSoup
def get_effect_data(data):
results = list()
soup = BeautifulSoup(data, 'html.parser')
# print soup
projects = soup.find_all('div', class_='repo-list-item d-flex flex-justify-start py-4 public source')
for project in projects:
# print project,'----'
try:
writer_project = project.find('a', attrs={'class': 'v-align-middle'})['href'].strip()
project_language = project.find('div', attrs={'class': 'd-table-cell col-2 text-gray pt-2'}).get_text().strip()
project_starts = project.find('a', attrs={'class': 'muted-link'}).get_text().strip()
update_desc = project.find('p', attrs={'class': 'f6 text-gray mr-3 mb-0 mt-2'}).get_text().strip()
# update_desc=None
result = (writer_project.split('/')[1], writer_project.split('/')[2], project_language, project_starts, update_desc)
results.append(result)
except Exception,e:
pass
# print results
return results
def get_response_data(page):
request_url = 'https://github.com/search'
params = {'o': 'desc', 'q': 'python', 's': 'stars', 'type': 'Repositories', 'p': page}
resp = requests.get(request_url, params)
return resp.text
if __name__ == '__main__':
total_page = 1 # 爬虫数据的总页数
datas = list()
for page in range(total_page):
res_data = get_response_data(page + 1)
data = get_effect_data(res_data)
datas += data
for i in datas:
print i