学习了《python网络爬虫实战》第一个爬虫,爬取新浪新闻

2018-09-10 11:07:40 浏览数 (1)

请安装anaconda,其中附带的spyder方便运行完查看变量 1.进入cmd控制台, 输入 pip install BeautifulSoup4 pip install requests 2.编写代码,代码已经很清晰了,直接运行不会报错并有成功的结果

代码语言:javascript复制
def getNewsDetail(newsUrl):
    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime  
    newsWeb = requests.get(newsUrl)
    newsWeb.encoding = 'utf-8'
    soup = BeautifulSoup(newsWeb.text,'lxml')
    result = {}
    result['title'] = soup.select('.main-title')[0].text
    result['newsSource'] = soup.select('.source')[0].text
    timeSource = soup.select('.date')[0].text
    result['datetime'] = datetime.strptime(timeSource,'%Y年%m月%d日 %H:%M')
    result['article'] = soup.select('.article')[0].text
    result['editor'] = soup.select('.show_author')[0].text.strip('责任编辑:')
    result['comment'] = soup.select('.num')[0].text
    return result  

def parseListLinks(url):
    import requests
    import json
    newsDetails = []
    request = requests.get(url)
    jsonLoad = json.loads(request.text.lstrip('  newsloadercallback(').rstrip(');'))
    newsUrls = []
    for item in jsonLoad['result']['data']:
      newsUrls.append(item['url'])
    for url in newsUrls:
      newsDetails.append(getNewsDetail(url))
    return newsDetails
      
if __name__ == '__main__':
    #获取单个新闻页面的信息
    newsUrl = 'http://news.sina.com.cn/s/wh/2018-01-08/doc-ifyqkarr7830426.shtml'
    newsDetail = getNewsDetail(newsUrl)
    #获取整个列表各个新闻页面的信息
    rollUrl='http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw
&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&
show_num=22&tag=1&format=json&page=23&callback=newsloadercallback&_=1515911333929'
    newsDetails = parseListLinks(rollUrl)

0 人点赞