Python 爬虫,eccoid 网站作品信息采集爬虫源码!

2024-01-03 15:42:45 浏览数 (1)

一个比较简单的爬虫,适合练手学习使用,主要是爬取和采集网站的作品信息,包括标题、内容及图片,其中图片采用了多线程爬取,算是比较简单的参考和学习案例,协议头的获取也做了随机处理,如果你正在找练手网站,不妨尝试爬取下载数据。

考虑到外网爬取,存在访问超时以及出错的情况发生,所以采用了三次访问超时重试的机制,同时对于详情页的爬取采用了报错机制跳过处理,以此来顺利爬取网站,适合新人学习爬取使用。

代码语言:javascript复制
    for href in hrefs:
        try:
            get_detail(href)
        except Exception as e:
            print(f">> 爬取{href}错误,n错误代码:{e}")
        time.sleep(3)

如感兴趣可以自行尝试爬取下载,注意爬取限制一下时间及频率。

附上完整源码仅供参考学习使用。

代码语言:javascript复制
# -*- coding: UTF-8 -*-
# beccoid @公众号:eryeji
# https://www.eccoid.com/works

import requests
from lxml import etree
import time
import random
import re
import threading
import os



def get_ua():
    ua_list = [
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36Chrome 17.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0Firefox 4.0.1',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
        'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
        'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
        'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
    ]
    ua=random.choice(ua_list)
    return ua


def get_hrefs():
    url='https://www.eccoid.com/works'
    headers={
        "User-Agent":get_ua()
    }
    response=requests.get(url=url,headers=headers,timeout=6)
    print(response.status_code)
    html=response.content.decode('utf-8')
    #print(html)
    tree=etree.HTML(html)
    hrefs=tree.xpath('//p[@class="gallery-caption-content"]/a/@href')
    print(len(hrefs))
    print(hrefs)
    for href in hrefs:
        try:
            get_detail(href)
        except Exception as e:
            print(f">> 爬取{href}错误,n错误代码:{e}")
        time.sleep(3)




def get_detail(href):
    print(f">> 正在爬取:{href}")
    response = get_resp(url=href)
    print(response.status_code)
    html = response.content.decode('utf-8')
    #print(html)
    tree = etree.HTML(html)
    h1=tree.xpath('//h1/strong/text()')[0]
    pattern = r"[/\:*?"<>|]"
    h1=re.sub(pattern, "_", h1)  # 替换为下划线
    print(h1)
    path = f'{h1}/'
    os.makedirs(path, exist_ok=True)
    print(f">> 生成保存目录 {h1} 文件夹成功!")
    ptexts=tree.xpath('//div[@class="sqs-block-content"]//text()')
    ptext='n'.join(ptexts)
    print(ptext)
    with open(f'{path}{h1}.txt','w',encoding='utf-8') as f:
        f.write(f'{h1}n{ptext}')
    print(f">> 保存 {h1}.txt 文件成功!")
    img=tree.xpath('//div[@class="section-background-content"]/img/@data-src')[0]
    imgs=tree.xpath('//div[@class="content"]//img/@data-src')
    imgs.insert(0,img)
    print(len(imgs))
    print(imgs)
    down_imgs(path, imgs)





# 3次重试
def get_resp(url):
    i = 0
    while i < 4:
        try:
            headers = {
                "User-Agent":get_ua()
            }
            response = requests.get(url, headers=headers, timeout=10)
            print(response.status_code)
            return response
        except requests.exceptions.RequestException:
            i  = 1
            print(f">> 获取网页出错,6S后将重试获取第:{i} 次")
            time.sleep(i * 2)



def down_imgs(path,imgs):
    threadings = []
    for img in imgs:
        t = threading.Thread(target=get_img, args=(path,img))
        threadings.append(t)
        t.start()

    for x in threadings:
        x.join()

    print(f"恭喜,多线程下载图片完成!")


#下载图片
def get_img(path,img_url):
    img_name = img_url.split('/')[-1]
    r = get_resp(img_url)
    time.sleep(1)
    with open(f'{path}{img_name}', 'wb')as f:
        f.write(r.content)
    print(f">> {img_name}下载图片成功")


def main():
    get_hrefs()




if __name__=='__main__':
    main()

0 人点赞