python 爬取影视网站下载链接
作者:GriffinLewis2001 发布时间:2022-07-30 00:59:42
标签:python,爬虫,下载链接,影视网站
项目地址:
https://github.com/GriffinLewis2001/Python_movie_links_scraper
运行效果
导入模块
import requests,re
from requests.cookies import RequestsCookieJar
from fake_useragent import UserAgent
import os,pickle,threading,time
import concurrent.futures
from goto import with_goto
爬虫主代码
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("请输入剧名(输入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
if dest == 100:
goto .end
x=0
print("\n以下为下载链接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("没找到或不想看\n")
完整代码
import requests,re
from requests.cookies import RequestsCookieJar
from fake_useragent import UserAgent
import os,pickle,threading,time
import concurrent.futures
from goto import with_goto
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("请输入剧名(输入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n请输入剧的编号(输100跳过此次搜寻):"))
if dest == 100:
goto .end
x=0
print("\n以下为下载链接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("没找到或不想看\n")
print("本软件由CLY.所有\n\n")
while(True):
main()
来源:https://github.com/GriffinLewis2001/Python_movie_links_scraper
0
投稿
猜你喜欢
- 概念所有Python的对象都是扩展的PyObject,python的垃圾回收机制是计算引用,这个对象中定义了py_ssize就是用来做这个事
- 众所周知windows平台漏洞百出,补丁一个接一个,但总是补也补不净。我把我所知道的看asp源码的方法总结了一下,并且用c#写了个应用程序来
- 如何在生产上部署Django?Django的部署可以有很多方式,采用nginx+uwsgi的方式是其中比较常见的一种方式。uwsgi介绍uW
- 本文实例讲述了python函数形参用法。分享给大家供大家参考。具体如下:函数形参:函数取得的参数是你提供给函数的值,这样函数就可以利用这些值
- 上次谈到客户端和服务端的编码“陷阱”,其中对url编码只是提及带过,并没有做深入讨论,事实上由于浏览器环境的复杂和不一致性,我们也很容易掉进
- python中的集合什么是集合?集合是一个无序的不重复元素序列常用来对两个列表进行交并差的处理集合与列表一样,支持所有数据类型集合与列表的区
- 一、基本(1)利用pytorch建好的层进行搭建import torchfrom torch import nnfrom torch.nn
- 用于逐行分析文本的代码示例fileIN = open(sys.argv[1], "r")line = fileIN.re
- 我就废话不多说了,大家还是直接看代码吧try: s = socket.socket() s.bind(('127.0.0.1'
- 1. 在 Python 中 XML 文件的编码问题1.Python 使用的xml.etree.ElementTree库只支持解析和生成标准的
- 如下所示:url = u'http://tieba.baidu.com/f?kw=权利的游戏&ie=utf-8&pn
- 那是一杠还是两杠呢?最简单的方式就是拿出钱包,掏出张“新”的50或100,翻到背面的银线上,就会发现两杠的符号。这个是以前跟同事在讨论是一杠
- 许多人利用Session变量来开发ASP(Active Server Pages)。这些变量与任何编程语言中通用的变量非常相似,并且具有和通
- 如何在SQL中启用全文检索功能?本文将通过实例向你剖折这个问题。这是一个全文索引的一个例子,首先在查询分析器中使用:use pubsgo--
- 本文完整示例代码及文件已上传至我的Github仓库https://github.com/CNFeffery/PythonPracticalS
- 如下所示: m_start =date +' 09:00' m_end =date +' 13:00'rsv
- 今天终于可以用wxPython开发GUI程序了,非常高兴。把其中的一些注意点写下来以供参考。在windows XP平台下,首先需要做以下环境
- Python过滤txt文件内重复内容,并将过滤后的内容保存到新的txt中示例如下 原文件处理之后的文件 直接上代码# -*-coding:u
- 网页制作中需要把握好很多原则和细节,今天我们来谈谈网页设计中的平衡、对比、连贯和留白。一、平衡如果你的页面是平衡的,当用户浏览这个页面的时候
- 本文实例讲述了Python基类函数的重载与调用方法。分享给大家供大家参考。具体分析如下:刚接触Python语言的时间不长,对于这个语言的很多