梨视频初级爬取
[DeadPool爬虫]梨视频初级爬取
通过上面一个爬虫教程,我们已经简单的介绍了一下requests库的使用,那么如何结合到实际爬虫使用中呢?这期的小课糖,就是结合requests的库使用,来进行一个小型爬虫实例的编写,后续我们将慢慢扩充这个小的项目,并直至将这个项目结合我的Deadpool框架使用起来~
PS: 如果您已经有爬虫相关知识了,可以直接看我的框架地址Deadpool项目
实战爬取梨视频网站
话不多说,直接上最终代码效果~
#!/usr/bin/env python
# -*- code: utf-8 -*-
import os
import re
import urllib
import requests
def download_pearvideo():
storage = 'video'
if storage not in os.listdir():
os.mkdir(storage)
seed_url = "https://www.pearvideo.com/category_8"
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69'
}
content = requests.get(seed_url, headers=headers).text
regex = r'<a href="(.*?)" class="vervideo-lilink actplay">'
vids = re.findall(regex, content)
videos = []
prefix_url = "https://www.pearvideo.com/"
for _ in vids:
_url = prefix_url + _
videos.append(_url)
for _ in videos:
print(_)
content = requests.get(_, headers=headers).text
regex = r'ldUrl="",srcUrl="(.*?)",vdoUrl='
video_url = re.findall(regex, content)
regex = r'<h1 class="video-tt">(.*?)</h1>'
video_name = re.findall(regex, content)
print(f'正在下载视频: {video_name[0]}')
urllib.request.urlretrieve(video_url[0], os.path.join(storage, f"{video_name[0]}.mp4"))
if __name__ == "__main__":
download_pearvideo()
对整体代码有个直观的感受之后,我们来对代码结合第一次的课程进行一个详细的讲解
===
这段代码是创建存储路径,很简单,判断一下是否路径已经存在,不存在则创建
storage = 'video'
if storage not in os.listdir():
os.mkdir(storage)
===
这段代码是构建我们请求的headers,即上个博客内容中的requests headers部分
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69'
}
===
这段代码是请求页面内容,这里返回的正式几个常见的类型之一,html body
content = requests.get(seed_url, headers=headers).text
===
这段代码是将下载的页面内容通过正则的手段匹配我们想要的内容,并形成下一步请求的链接列表
regex = r'<a href="(.*?)" class="vervideo-lilink actplay">'
vids = re.findall(regex, content)
videos = []
prefix_url = "https://www.pearvideo.com/"
for _ in vids:
_url = prefix_url + _
videos.append(_url)
===
这段代码是通过循环的方式将我们目标挨个请求一遍
for _ in videos:
print(_)
content = requests.get(_, headers=headers).text
===
这段代码也是将下载的页面内容通过正则的手段匹配我们想要的内容(视频连接、视频名称)
regex = r'ldUrl="",srcUrl="(.*?)",vdoUrl='
video_url = re.findall(regex, content)
regex = r'<h1 class="video-tt">(.*?)</h1>'
video_name = re.findall(regex, content)
===
这段代码将我们的视频下载下来,并进行保存
print(f'正在下载视频: {video_name[0]}')
urllib.request.urlretrieve(video_url[0], os.path.join(storage, f"{video_name[0]}.mp4"))
OK,代码讲解完毕,是不是十分简单~
实战练习
上面的简单例子做好代码解读之后,我们这次也再结合一个实战的例子巩固一下,下面就是结合上面所说的实战练习
目标是: 通过请求梨视频网站的科技类栏目,然后找到页面内短视频的创作者,进入创作者的页面,获取其创作的短视频进行保存,这里保存的方式是以md5命名视频文件MD5.mp4,并创建一个与之相对应的MD5.txt文件记录视频的标题和描述
代码链接来自我女朋友的Github-2020-03-31
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2020/4/1 9:48
@Author : WangHuan
@Contact : hi_chengzi@126.com
@File : 2020-03-31.py
@Software: PyCharm
@description: 爬虫实例操作,下载梨视频中的所有视频
"""
import hashlib
import os
import re
import urllib.request
import requests
# 查看是否存在videos目录,如果没有则创建
storage = 'videos'
if storage not in os.listdir():
os.mkdir(storage)
seed_url = 'https://www.pearvideo.com/category_8'
prefix_url = 'https://www.pearvideo.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
}
# 下载科技页面的视频
def download_pearvideo():
# 找到页面中的视频地址并保存
content = requests.get(seed_url, headers=headers).text
vids_regex = r'<a href="(.*?)" class="vervideo-lilink actplay">'
vids = re.findall(vids_regex, content)
videos = []
for _ in vids:
_url = prefix_url + _
videos.append(_url)
# 访问视频地址,将视频保存为以标题命名的视频文件
for _ in videos:
print(_)
content = requests.get(_, headers=headers).text
mp4_regex = r'ldUrl="",srcUrl="(.*?)",vdoUrl='
video_url = re.findall(mp4_regex, content)
title_regex = r'<h1 class="video-tt">(.*?)</h1>'
video_title = re.findall(title_regex, content)
print(f'正在下载视频:{video_title[0]}')
urllib.request.urlretrieve(video_url[0], os.path.join(storage, f"{video_title[0]}.mp4"))
# 进阶,在科技页面找到所有作者进入作者页面,保存对应视频
def download_authorvideo():
content = requests.get(seed_url, headers=headers).text
author_regex = r'<a href="(.*?)" class="column">'
author_ids = re.findall(author_regex, content)
author_url = []
for _ in author_ids:
_url = prefix_url + _
if _url not in author_url:
author_url.append(_url)
videos = []
for _ in author_url:
content = requests.get(_, headers=headers).text
vids_regex = r'<a href="(.*?)" class="vervideo-lilink actplay">'
vids = re.findall(vids_regex, content)
for v in vids:
_url = prefix_url + v
videos.append(_url)
for _ in videos:
print(_)
content = requests.get(_, headers=headers).text
mp4_regex = r'ldUrl="",srcUrl="(.*?)",vdoUrl='
video_url = re.findall(mp4_regex, content)
title_regex = r'<h1 class="video-tt">(.*?)</h1>'
video_title = re.findall(title_regex, content)
print(f'正在下载视频:{video_title[0]}')
urllib.request.urlretrieve(video_url[0], os.path.join(storage, f"{video_title[0]}.mp4"))
# 进阶,将视频名称保存为md5加密值,并将描述文字保存为相同md5值的txt文件
def md5_video():
content = requests.get(seed_url, headers=headers).text
vids_regex = r'<a href="(.*?)" class="vervideo-lilink actplay">'
vids = re.findall(vids_regex, content)
videos = []
for _ in vids:
_url = prefix_url + _
videos.append(_url)
# 访问视频地址,将视频保存为md5加密后的视频文件
for _ in videos:
print(_)
content = requests.get(_, headers=headers).text
mp4_regex = r'ldUrl="",srcUrl="(.*?)",vdoUrl='
video_url = re.findall(mp4_regex, content)
title_regex = r'<h1 class="video-tt">(.*?)</h1>'
video_title = re.findall(title_regex, content)
md5 = hashlib.md5()
md5.update(video_title[0].encode(encoding='utf-8'))
md5_title = md5.hexdigest()
print(f'正在下载视频:{video_title[0]}')
urllib.request.urlretrieve(video_url[0], os.path.join(storage, f"{md5_title}.mp4"))
sum_regex = r'<div class="summary">(.*?)</div>'
video_sum = re.findall(sum_regex, content)
with open(f"videos/{md5_title}.txt", 'w', encoding='utf-8') as f:
f.write(video_sum[0])
if __name__ == '__main__':
# download_pearvideo()
# download_authorvideo()
md5_video()