python 爬取影视网站下载链接
作者:GriffinLewis2001 发布时间:2022-07-30 00:59:42
标签:python,爬虫,下载链接,影视网站
项目地址:
https://github.com/GriffinLewis2001/Python_movie_links_scraper
运行效果
导入模块
import requests,re
from requests.cookies import RequestsCookieJar
from fake_useragent import UserAgent
import os,pickle,threading,time
import concurrent.futures
from goto import with_goto
爬虫主代码
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("请输入剧名(输入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
if dest == 100:
goto .end
x=0
print("\n以下为下载链接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("没找到或不想看\n")
完整代码
import requests,re
from requests.cookies import RequestsCookieJar
from fake_useragent import UserAgent
import os,pickle,threading,time
import concurrent.futures
from goto import with_goto
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("请输入剧名(输入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
if dest == 100:
goto .end
x=0
print("\n以下为下载链接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("没找到或不想看\n")
print("本软件由CLY.所有\n\n")
while(True):
main()
来源:https://github.com/GriffinLewis2001/Python_movie_links_scraper


猜你喜欢
- 本来这篇文章是5月份写的,今天修改了一下内容,就成今天发表的了,CSDN这是出BUG了还是什么改规则了。。。引文:决策树和基于规则的分类器都
- 接着上篇文章《解析SQL 表结构信息查询 含主外键、自增长》里面提到了INFORMATION_SCHEMA视图,其实到了SQL 2005微软
- 开发过程中,我们经常会遇到代码回滚的情况。正常人都知道,git 回滚有两大宝:git revertgit reset当我们在本地开发,还未
- Pandas是一个强大的数据处理库,它提供了高性能、易于使用的数据结构和数据分析工具。本文将介绍Pandas常用的数据结构和常用的数据分析技
- 在一个页面制作过程,突然被设计稿上的一个问题难住了,思路一时没打开,后来在费人的提醒下,用定位控制,顺利完成。这个是我做的大概的
- 天冷,人懒,事多,我就不全文翻译了。只列几个标题,很多内容完全按照我自己的理解写了一下。想读原汁原味的请移步:Icon design tre
- 导入在阅读过程中如果遇到一些带有水印的资料是比较烦心的,如下图所示,水印以及类似的内容会影响我们的阅读体验,而市面上去水印的功能有多要收费且
- 当然首先得去下载ASPupload 程序,安装后使用!官方网站下载:http://www.aspupload.com/使用ASP实现文件上载
- 引言:Python中的变量在使用中很流畅,可以不关注类型,任意赋值,对于开发来说效率得到了提升,但若不了解其中的机理,往往也会犯一些小错,让
- 最近在玩一个叫Baba is you的游戏,很羡慕里面的一个转场特效,所以试着做了一下。主要使用了JS和CSS,特效主要是用CSS实现的。H
- 一、前言Celery是一个基于python开发的分布式任务队列,而做python WEB开发最为流行的框架莫属Django,但是Django
- vue计算属性的缓存computed用法计算属性的缓存<!DOCTYPE html><html lang="en
- 1、可以控制左横向滚动还是右横向滚动。2、鼠标悬停上时暂停滚动,移开后恢复滚动。3、间歇时间按需调整,但不要低于容器向左/右移动的时间。JA
- 一、控制用户存取 1、创建修改用户Creating Users Create/alter user new_user identified
- 相比于2018年,在ICLR2019提交论文中,提及不同框架的论文数量发生了极大变化,网友发现,提及tensorflow的论文数量从2018
- 废话就不多说了,直接上内容。<form action="/home/search" method="ge
- 前言最近遇到的几个网站在提交密码时提交的已经是密文,也就是说在网络上传输的密码是密文,这样提升了密码在网络传输中的安全性。后端语言加解密已
- 一、实验内容编写一Python程序,要求实现以下功能:读入一幅图像。使用两种以上的方法分别向图像中添加噪声。输出一幅二值图像,图像中未加入噪
- 不知道大家在做页面的时候会不会遇到样式定义不生效的问题,基本的表现就是怎么改样式都没显示或只有某些浏览器正常,这时通常需要做下面的几步:确认
- 环境:前端 vue ip地址:192.168.1.205后端 springboot2.0 ip地址:192.168.1.217主要开发后端。