python 爬取影视网站下载链接
作者:GriffinLewis2001 时间:2022-07-30 00:59:42
项目地址:
https://github.com/GriffinLewis2001/Python_movie_links_scraper
运行效果
导入模块
import requests,re
from requests.cookies import RequestsCookieJar
from fake_useragent import UserAgent
import os,pickle,threading,time
import concurrent.futures
from goto import with_goto
爬虫主代码
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("请输入剧名(输入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
if dest == 100:
goto .end
x=0
print("\n以下为下载链接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("没找到或不想看\n")
完整代码
import requests,re
from requests.cookies import RequestsCookieJar
from fake_useragent import UserAgent
import os,pickle,threading,time
import concurrent.futures
from goto import with_goto
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("请输入剧名(输入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
if dest == 100:
goto .end
x=0
print("\n以下为下载链接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("没找到或不想看\n")
print("本软件由CLY.所有\n\n")
while(True):
main()
来源:https://github.com/GriffinLewis2001/Python_movie_links_scraper
标签:python,爬虫,下载链接,影视网站
![](/images/zang.png)
![](/images/jiucuo.png)
猜你喜欢
教你一步步利用python实现贪吃蛇游戏
2023-09-21 13:23:27
![](https://img.aspxhome.com/file/2023/0/107810_0s.png)
Javascript(es2016) import和require用法和区别详解
2024-04-19 09:57:04
FrontPage 2002应用技巧四则
2008-08-17 10:57:00
PyCharm导入python项目并配置虚拟环境的教程详解
2023-08-18 13:31:41
![](https://img.aspxhome.com/file/2023/7/88927_0s.png)
如何利用Vue3管理系统实现动态路由和动态侧边菜单栏
2024-05-05 09:25:34
![](https://img.aspxhome.com/file/2023/9/128889_0s.png)
Pycharm安装scrapy及初始化爬虫项目的完整步骤
2023-04-03 10:49:43
![](https://img.aspxhome.com/file/2023/4/108114_0s.jpg)
ASP实例:幻灯片新闻代码
2008-11-21 17:40:00
制作Python数字华容道的实现(可选择关卡)
2022-12-20 19:32:18
![](https://img.aspxhome.com/file/2023/5/100805_0s.png)
快速掌握 Mysql数据库对文件操作的封装
2009-02-23 17:37:00
WEB页面工具之语言XML的定义
2008-05-29 11:29:00
jsp+ajax实现无刷新上传文件的方法
2024-04-17 10:39:30
![](https://img.aspxhome.com/file/2023/9/136189_0s.jpg)
Python 加密与解密小结
2021-04-28 00:35:47
python 微信好友特征数据分析及可视化
2021-09-10 15:45:20
![](https://img.aspxhome.com/file/2023/1/131021_0s.png)
python实现文件快照加密保护的方法
2022-08-21 20:01:29
Linux下安装PHP MSSQL扩展教程
2024-03-11 10:41:47
![](https://img.aspxhome.com/file/2023/2/80512_0s.png)
一条SQL语句修改多表多字段的信息的具体实现
2024-01-18 13:22:56
Python利用openpyxl库遍历Sheet的实例
2023-10-20 20:19:01
pyqt5 QScrollArea设置在自定义侧(任何位置)
2023-05-22 09:33:51
![](https://img.aspxhome.com/file/2023/1/70061_0s.gif)
初学vue出现空格警告的原因及其解决方案
2024-05-09 09:51:40
![](https://img.aspxhome.com/file/2023/3/127173_0s.jpg)
CentOS 6、7下mysql 5.7 详细安装教程
2024-01-24 18:00:47
![](https://img.aspxhome.com/file/2023/9/121309_0s.jpg)